prompt
stringlengths 162
4.26M
| response
stringlengths 109
5.16M
|
---|---|
Generate the Verilog code corresponding to the following Chisel files.
File PermuteSequencer.scala:
package saturn.backend
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import saturn.common._
import saturn.insns._
class PermuteSequencer(exu_insns: Seq[VectorInstruction])(implicit p: Parameters) extends PipeSequencer(new PermuteMicroOp)(p) {
def accepts(inst: VectorIssueInst) = {
val needs_mask = inst.vmu && (!inst.vm && inst.mop =/= mopUnit)
val needs_index = inst.vmu && inst.mop(0)
val arith = !inst.vmu && new VectorDecoder(inst.funct3, inst.funct6, inst.rs1, inst.rs2, exu_insns.filter(_.props.contains(UsesPermuteSeq.Y)), Nil).matched
needs_mask || needs_index || arith
}
val valid = RegInit(false.B)
val inst = Reg(new BackendIssueInst)
val eidx = Reg(UInt(log2Ceil(maxVLMax).W))
val rvs2_mask = Reg(UInt(egsTotal.W))
val rvm_mask = Reg(UInt(egsPerVReg.W))
val head = Reg(Bool())
val slide_offset = Reg(UInt((1+log2Ceil(maxVLMax)).W))
val slide = !inst.vmu && inst.funct3 =/= OPIVV
val slide_up = !inst.funct6(0)
val rs2 = Mux(inst.rs1_is_rs2, inst.rs1, inst.rs2)
val gatherei16 = inst.funct3 === OPIVV && inst.opif6 === OPIFunct6.rgatherei16
val renvm = inst.renvm
val renv2 = inst.renv2
val incr_eew = Mux(inst.vmu, inst.mem_idx_size,
Mux(gatherei16, 1.U, inst.vconfig.vtype.vsew))
val eff_vl = Mux(slide,
Mux(slide_up, inst.vconfig.vl - slide_offset, min(inst.vconfig.vtype.vlMax, inst.vconfig.vl + slide_offset)),
inst.vconfig.vl
)(log2Ceil(maxVLMax),0)
val next_eidx = get_next_eidx(eff_vl, eidx, incr_eew, 0.U, false.B, false.B)
val tail = next_eidx === eff_vl
io.dis.ready := !valid || (tail && io.iss.fire) && !io.dis_stall
when (io.dis.fire) {
val iss_inst = io.dis.bits
val offset = Mux(iss_inst.isOpi, get_max_offset(Mux(iss_inst.funct3(2), iss_inst.rs1_data, iss_inst.imm5)), 1.U)
val slide = !iss_inst.vmu && iss_inst.funct3 =/= OPIVV
val slide_up = !iss_inst.funct6(0)
val slide_start = Mux(slide_up, 0.U, offset)
val vlmax = iss_inst.vconfig.vtype.vlMax
val slide_no_read = Mux(slide_up,
iss_inst.vconfig.vl <= offset,
offset >= vlmax)
valid := Mux(!slide, true.B, !slide_no_read)
inst := iss_inst
eidx := Mux(!slide, iss_inst.vstart, slide_start)
slide_offset := offset
val rs2 = Mux(iss_inst.rs1_is_rs2, iss_inst.rs1, iss_inst.rs2)
val renv2_arch_mask = get_arch_mask(rs2, iss_inst.emul)
rvs2_mask := Mux(iss_inst.renv2, FillInterleaved(egsPerVReg, renv2_arch_mask), 0.U)
rvm_mask := Mux(iss_inst.renvm, ~(0.U(egsPerVReg.W)), 0.U)
head := true.B
} .elsewhen (io.iss.fire) {
valid := !tail
head := false.B
}
io.vat := inst.vat
io.seq_hazard.valid := valid
io.seq_hazard.bits.rintent := hazardMultiply(rvs2_mask | rvm_mask)
io.seq_hazard.bits.wintent := false.B
io.seq_hazard.bits.vat := inst.vat
val vs2_read_oh = Mux(renv2, UIntToOH(io.rvs2.req.bits.eg), 0.U)
val vm_read_oh = Mux(renvm, UIntToOH(io.rvm.req.bits.eg), 0.U)
val raw_hazard = ((vm_read_oh | vs2_read_oh) & io.older_writes) =/= 0.U
val data_hazard = raw_hazard
val oldest = inst.vat === io.vat_head
io.rvs2.req.valid := valid && renv2
io.rvs2.req.bits.eg := getEgId(rs2, eidx, incr_eew, false.B)
io.rvs2.req.bits.oldest := oldest
io.rvm.req.valid := valid && renvm
io.rvm.req.bits.eg := getEgId(0.U, eidx, 0.U, true.B)
io.rvm.req.bits.oldest := oldest
io.iss.valid := valid && !data_hazard && (!renvm || io.rvm.req.ready) && (!renv2 || io.rvs2.req.ready)
io.iss.bits.renv2 := renv2
io.iss.bits.renvm := renvm
io.iss.bits.rvs2_data := io.rvs2.resp
io.iss.bits.rvs2_eew := incr_eew
io.iss.bits.eidx := eidx
io.iss.bits.vl := eff_vl
io.iss.bits.rvm_data := Mux(renvm, io.rvm.resp, ~(0.U(dLen.W)))
io.iss.bits.vmu := inst.vmu
io.iss.bits.tail := tail
when (io.iss.fire && !tail) {
when (next_is_new_eg(eidx, next_eidx, incr_eew, false.B) && vParams.enableChaining.B) {
rvs2_mask := rvs2_mask & ~vs2_read_oh
}
when (next_is_new_eg(eidx, next_eidx, 0.U, true.B) && vParams.enableChaining.B) {
rvm_mask := rvm_mask & ~UIntToOH(io.rvm.req.bits.eg)
}
eidx := next_eidx
}
io.busy := valid
io.head := head
}
File Parameters.scala:
package saturn.common
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util._
import freechips.rocketchip.tile._
import freechips.rocketchip.diplomacy.{BufferParams}
import saturn.exu._
object VectorParams {
// minParams:
// For a very small area-efficient vector unit with iterative
// and element-wise functional units
def minParams = VectorParams()
// refParams
// For a standard modestly capable small vector unit with
// SIMD functional units
def refParams = minParams.copy(
vlrobEntries = 4,
vlissqEntries = 3,
vsissqEntries = 3,
vxissqEntries = 3,
vatSz = 5,
useSegmentedIMul = true,
doubleBufferSegments = true,
useScalarFPFMA = false,
vrfBanking = 4,
)
// dspParams
// For a wide high-performance vector unit with multi-issue
def dspParams = refParams.copy(
issStructure = VectorIssueStructure.Shared
)
// genParams:
// For a vector unit that performs better on less-optimized
// code sequences
def genParams = dspParams.copy(
issStructure = VectorIssueStructure.Split,
vlifqEntries = 16,
vlrobEntries = 16
)
// multiFMAParams:
// Provides a second sequencer and set of functional units for FMA operations
def multiFMAParams = genParams.copy(
issStructure = VectorIssueStructure.MultiFMA
)
// multiMACParams:
// Provides a second sequencer and set of functional units for integer MAC operations
def multiMACParams = genParams.copy(
issStructure = VectorIssueStructure.MultiMAC
)
// dmaParams:
// For a vector unit that only does memcpys, and no arithmetic
def dmaParams = VectorParams(
vdqEntries = 2,
vliqEntries = 4,
vsiqEntries = 4,
vlifqEntries = 32,
vlrobEntries = 4,
vsifqEntries = 32,
vlissqEntries = 2,
vsissqEntries = 1,
vrfBanking = 1,
useIterativeIMul = true
)
// The parameters below are approximations
// hwaParams
// For a vector unit with limited sequencer slots akin to Hwacha
def hwaParams = genParams.copy(
vatSz = 3, // 8 mseq Entries
vdqEntries = 1,
vlissqEntries = 8,
vsissqEntries = 8,
vxissqEntries = 8,
vpissqEntries = 8,
hwachaLimiter = Some(8), // sequencer slots
)
// lgvParams
// For a vector unit with very long vector lengths
def lgvParams = VectorParams(
vatSz = 5,
vlifqEntries = 32,
vsifqEntries = 32,
vlrobEntries = 32,
vlissqEntries = 8,
vsissqEntries = 8,
vxissqEntries = 8,
vpissqEntries = 8,
useSegmentedIMul = true,
useScalarFPMisc = false,
useScalarFPFMA = false,
vrfBanking = 4,
issStructure = VectorIssueStructure.Split
)
}
case class VXSequencerParams(
name: String,
fus: Seq[FunctionalUnitFactory]
) {
def insns = fus.map(_.insns).flatten
}
case class VXIssuePathParams(
name: String,
depth: Int,
seqs: Seq[VXSequencerParams]
) {
def insns = seqs.map(_.insns).flatten
}
object VXFunctionalUnitGroups {
def integerFUs(idivDoesImul: Boolean = false) = Seq(
IntegerPipeFactory,
ShiftPipeFactory,
BitwisePipeFactory,
IntegerDivideFactory(idivDoesImul),
MaskUnitFactory,
PermuteUnitFactory
)
def integerMAC(pipeDepth: Int, useSegmented: Boolean) = Seq(
IntegerMultiplyFactory(pipeDepth, useSegmented)
)
def allIntegerFUs(idivDoesImul: Boolean, imaDepth: Int, useSegmentedImul: Boolean) = (
integerFUs(idivDoesImul) ++ integerMAC(imaDepth, useSegmentedImul)
)
def sharedFPFMA(pipeDepth: Int) = Seq(
FPFMAFactory(pipeDepth, true)
)
def sharedFPMisc = Seq(
SharedFPMiscFactory
)
def fpFMA(pipeDepth: Int) = Seq(
FPFMAFactory(pipeDepth, false)
)
def fpMisc = Seq(
FPDivSqrtFactory,
FPCmpFactory,
FPConvFactory
)
def allFPFUs(fmaPipeDepth: Int, useScalarFPFMA: Boolean, useScalarFPMisc: Boolean) = (
(if (useScalarFPFMA) sharedFPFMA(fmaPipeDepth) else fpFMA(fmaPipeDepth)) ++
(if (useScalarFPMisc) sharedFPMisc else fpMisc)
)
}
sealed trait VectorIssueStructure {
def generate(params: VectorParams): Seq[VXIssuePathParams]
}
object VectorIssueStructure {
import VXFunctionalUnitGroups._
case object Unified extends VectorIssueStructure {
def generate(params: VectorParams) = {
val fp_int_path = VXIssuePathParams(
name = "fp_int",
depth = params.vxissqEntries,
seqs = Seq(
VXSequencerParams("fp_int", (
allIntegerFUs(params.useIterativeIMul, params.imaPipeDepth, params.useSegmentedIMul) ++
allFPFUs(params.fmaPipeDepth, params.useScalarFPFMA, params.useScalarFPMisc)
))
)
)
Seq(fp_int_path)
}
}
case object Shared extends VectorIssueStructure {
def generate(params: VectorParams) = {
val fp_int_path = VXIssuePathParams(
name = "fp_int",
depth = params.vxissqEntries,
seqs = Seq(
VXSequencerParams("int", allIntegerFUs(params.useIterativeIMul, params.imaPipeDepth, params.useSegmentedIMul)),
VXSequencerParams("fp", allFPFUs(params.fmaPipeDepth, params.useScalarFPFMA, params.useScalarFPMisc))
)
)
Seq(fp_int_path)
}
}
case object Split extends VectorIssueStructure {
def generate(params: VectorParams) = {
val int_path = VXIssuePathParams(
name = "int",
depth = params.vxissqEntries,
seqs = Seq(
VXSequencerParams("int", allIntegerFUs(params.useIterativeIMul, params.imaPipeDepth, params.useSegmentedIMul)),
)
)
val fp_path = VXIssuePathParams(
name = "fp",
depth = params.vxissqEntries,
seqs = Seq(
VXSequencerParams("fp", allFPFUs(params.fmaPipeDepth, params.useScalarFPFMA, params.useScalarFPMisc))
)
)
Seq(int_path, fp_path)
}
}
case object MultiFMA extends VectorIssueStructure {
def generate(params: VectorParams) = {
require(!params.useScalarFPFMA)
val int_path = VXIssuePathParams(
name = "int",
depth = params.vxissqEntries,
seqs = Seq(
VXSequencerParams("int", allIntegerFUs(params.useIterativeIMul, params.imaPipeDepth, params.useSegmentedIMul)),
)
)
val fp_path = VXIssuePathParams(
name = "fp",
depth = params.vxissqEntries,
seqs = Seq(
VXSequencerParams("fp0", allFPFUs(params.fmaPipeDepth, params.useScalarFPFMA, params.useScalarFPMisc)),
VXSequencerParams("fp1", fpFMA(params.fmaPipeDepth))
)
)
Seq(int_path, fp_path)
}
}
case object MultiMAC extends VectorIssueStructure {
def generate(params: VectorParams) = {
require(!params.useIterativeIMul && params.useSegmentedIMul)
val int_path = VXIssuePathParams(
name = "int",
depth = params.vxissqEntries,
seqs = Seq(
VXSequencerParams("int0", allIntegerFUs(params.useIterativeIMul, params.imaPipeDepth, params.useSegmentedIMul)),
VXSequencerParams("int1", integerMAC(params.imaPipeDepth, params.useSegmentedIMul))
)
)
val fp_path = VXIssuePathParams(
name = "fp",
depth = params.vxissqEntries,
seqs = Seq(
VXSequencerParams("fp", allFPFUs(params.fmaPipeDepth, params.useScalarFPFMA, params.useScalarFPMisc))
)
)
Seq(int_path, fp_path)
}
}
}
case class VectorParams(
// In-order dispatch Queue
vdqEntries: Int = 4,
// Load store instruction queues (in VLSU)
vliqEntries: Int = 4,
vsiqEntries: Int = 4,
// Load store in-flight queues (in VLSU)
vlifqEntries: Int = 8,
vsifqEntries: Int = 16,
vlrobEntries: Int = 2,
// Scatter-gather engine params
vsgPorts: Int = 8,
vsgifqEntries: Int = 4,
vsgBuffers: Int = 3,
// Load/store/execute/permute/maskindex issue queues
vlissqEntries: Int = 0,
vsissqEntries: Int = 0,
vxissqEntries: Int = 0,
vpissqEntries: Int = 0,
dLen: Int = 64,
vatSz: Int = 3,
useSegmentedIMul: Boolean = false,
useScalarFPFMA: Boolean = true, // Use shared scalar FPU all non-FMA FP instructions
useScalarFPMisc: Boolean = true, // Use shared scalar FPU all non-FMA FP instructions
useIterativeIMul: Boolean = false,
fmaPipeDepth: Int = 4,
imaPipeDepth: Int = 3,
// for comparisons only
hazardingMultiplier: Int = 0,
hwachaLimiter: Option[Int] = None,
enableChaining: Boolean = true,
latencyInject: Boolean = false,
enableDAE: Boolean = true,
enableOOO: Boolean = true,
enableScalarVectorAddrDisambiguation: Boolean = true,
doubleBufferSegments: Boolean = false,
vrfBanking: Int = 2,
vrfHiccupBuffer: Boolean = true,
issStructure: VectorIssueStructure = VectorIssueStructure.Unified,
tlBuffer: BufferParams = BufferParams.default,
) {
def supported_ex_insns = issStructure.generate(this).map(_.insns).flatten
}
case object VectorParamsKey extends Field[VectorParams]
trait HasVectorParams extends HasVectorConsts { this: HasCoreParameters =>
implicit val p: Parameters
def vParams: VectorParams = p(VectorParamsKey)
def dLen = vParams.dLen
def dLenB = dLen / 8
def dLenOffBits = log2Ceil(dLenB)
def dmemTagBits = log2Ceil(vParams.vlifqEntries.max(vParams.vsifqEntries))
def sgmemTagBits = log2Ceil(vParams.vsgifqEntries)
def egsPerVReg = vLen / dLen
def egsTotal = (vLen / dLen) * 32
def vrfBankBits = log2Ceil(vParams.vrfBanking)
def lsiqIdBits = log2Ceil(vParams.vliqEntries.max(vParams.vsiqEntries))
val debugIdSz = 16
val nRelease = vParams.issStructure match {
case VectorIssueStructure.Unified => 3
case VectorIssueStructure.Shared | VectorIssueStructure.Split => 4
case VectorIssueStructure.MultiFMA | VectorIssueStructure.MultiMAC => 5
}
def getEgId(vreg: UInt, eidx: UInt, eew: UInt, bitwise: Bool): UInt = {
val base = vreg << log2Ceil(egsPerVReg)
val off = eidx >> Mux(bitwise, log2Ceil(dLen).U, (log2Ceil(dLenB).U - eew))
base +& off
}
def getByteId(vreg: UInt, eidx: UInt, eew: UInt): UInt = {
Cat(getEgId(vreg, eidx, eew, false.B), (eidx << eew)(log2Ceil(dLenB)-1,0))
}
def eewByteMask(eew: UInt) = (0 until (1+log2Ceil(eLen/8))).map { e =>
Mux(e.U === eew, ((1 << (1 << e)) - 1).U, 0.U)
}.reduce(_|_)((eLen/8)-1,0)
def eewBitMask(eew: UInt) = FillInterleaved(8, eewByteMask(eew))
def cqOlder(i0: UInt, i1: UInt, tail: UInt) = (i0 < i1) ^ (i0 < tail) ^ (i1 < tail)
def dLenSplat(in: UInt, eew: UInt) = {
val v = Wire(UInt(64.W))
v := in
Mux1H(UIntToOH(eew), (0 until 4).map { i => Fill(dLenB >> i, v((8<<i)-1,0)) })
}
def sextElem(in: UInt, in_eew: UInt): UInt = VecInit.tabulate(4)( { eew =>
Cat(in((8 << eew)-1), in((8 << eew)-1,0)).asSInt
})(in_eew)(64,0)
def extractElem(in: UInt, in_eew: UInt, eidx: UInt): UInt = {
val bytes = in.asTypeOf(Vec(dLenB, UInt(8.W)))
VecInit.tabulate(4) { eew =>
val elem = if (dLen == 64 && eew == 3) {
in
} else {
VecInit(bytes.grouped(1 << eew).map(g => VecInit(g).asUInt).toSeq)(eidx(log2Ceil(dLenB)-1-eew,0))
}
elem((8 << eew)-1,0)
}(in_eew)
}
def maxPosUInt(sew: Int) = Cat(0.U, ~(0.U(((8 << sew)-1).W)))
def minNegUInt(sew: Int) = Cat(1.U, 0.U(((8 << sew)-1).W))
def maxPosSInt(sew: Int) = ((1 << ((8 << sew)-1))-1).S
def minNegSInt(sew: Int) = (-1 << ((8 << sew)-1)).S
def maxPosFPUInt(sew: Int) = {
val expBits = Seq(4, 5, 8, 11)(sew)
val fracBits = (8 << sew) - expBits - 1
Cat(0.U, ~(0.U(expBits.W)), 0.U(fracBits.W))
}
def minNegFPUInt(sew: Int) = {
val expBits = Seq(4, 5, 8, 11)(sew)
val fracBits = (8 << sew) - expBits - 1
Cat(1.U, ~(0.U(expBits.W)), 0.U(fracBits.W))
}
def get_arch_mask(reg: UInt, emul: UInt) = VecInit.tabulate(4)({ lmul =>
FillInterleaved(1 << lmul, UIntToOH(reg >> lmul)((32>>lmul)-1,0))
})(emul)
def log2_up(f: UInt, max: Int) = VecInit.tabulate(max)({nf => log2Ceil(nf+1).U})(f)
def hazardMultiply(mask: UInt): UInt = if (vParams.hazardingMultiplier == 0) { mask } else {
require((1 << vParams.hazardingMultiplier) <= egsTotal)
VecInit(mask.asBools.grouped(1 << vParams.hazardingMultiplier).map { g =>
Fill(1 << vParams.hazardingMultiplier, g.orR)
}.toSeq).asUInt
}
}
File PipeSequencer.scala:
package saturn.common
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.tile.{CoreModule}
import saturn.common._
abstract class PipeSequencer[T <: Data](issType: T)(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams {
val io = IO(new Bundle {
val dis = Flipped(Decoupled(new BackendIssueInst))
val dis_stall = Input(Bool()) // used to disable OOO
val seq_hazard = Output(Valid(new SequencerHazard))
val vat = Output(UInt(vParams.vatSz.W))
val vat_head = Input(UInt(vParams.vatSz.W))
val older_writes = Input(UInt(egsTotal.W))
val older_reads = Input(UInt(egsTotal.W))
val busy = Output(Bool())
val head = Output(Bool())
val rvs1 = new VectorReadIO
val rvs2 = new VectorReadIO
val rvd = new VectorReadIO
val rvm = new VectorReadIO
val perm = new Bundle {
val req = Decoupled(new CompactorReq(dLenB))
val data = Input(UInt(dLen.W))
}
val iss = Decoupled(issType)
val acc = Input(Valid(new VectorWrite(dLen)))
})
def accepts(inst: VectorIssueInst): Bool
def min(a: UInt, b: UInt) = Mux(a > b, b, a)
def get_max_offset(offset: UInt): UInt = min(offset, maxVLMax.U)(log2Ceil(maxVLMax),0)
def get_head_mask(bit_mask: UInt, eidx: UInt, eew: UInt) = bit_mask << (eidx << eew)(dLenOffBits-1,0)
def get_tail_mask(bit_mask: UInt, eidx: UInt, eew: UInt) = bit_mask >> (0.U(dLenOffBits.W) - (eidx << eew)(dLenOffBits-1,0))
def get_vm_mask(mask_resp: UInt, eidx: UInt, eew: UInt) = {
val vm_off = ((1 << dLenOffBits) - 1).U(log2Ceil(dLen).W)
val vm_eidx = (eidx & ~(vm_off >> eew))(log2Ceil(dLen)-1,0)
val vm_resp = (mask_resp >> vm_eidx)(dLenB-1,0)
Mux1H(UIntToOH(eew), (0 until 4).map { w => FillInterleaved(1 << w, vm_resp) })
}
def get_next_eidx(vl: UInt, eidx: UInt, eew: UInt, sub_dlen: UInt, reads_mask: Bool, elementwise: Bool) = {
val next = Wire(UInt((1+log2Ceil(maxVLMax)).W))
next := Mux(elementwise, eidx +& 1.U, Mux(reads_mask,
eidx +& dLen.U,
(((eidx >> (dLenOffBits.U - eew - sub_dlen)) +& 1.U) << (dLenOffBits.U - eew - sub_dlen))
))
min(vl, next)
}
def next_is_new_eg(eidx: UInt, next_eidx: UInt, eew: UInt, masked: Bool) = {
val offset = Mux(masked, log2Ceil(dLen).U, dLenOffBits.U - eew)
(next_eidx >> offset) =/= (eidx >> offset)
}
io.rvs1.req.valid := false.B
io.rvs1.req.bits := DontCare
io.rvs2.req.valid := false.B
io.rvs2.req.bits := DontCare
io.rvd.req.valid := false.B
io.rvd.req.bits := DontCare
io.rvm.req.valid := false.B
io.rvm.req.bits := DontCare
io.perm.req.valid := false.B
io.perm.req.bits := DontCare
}
| module PermuteSequencer( // @[PermuteSequencer.scala:9:7]
input clock, // @[PermuteSequencer.scala:9:7]
input reset, // @[PermuteSequencer.scala:9:7]
output io_dis_ready, // @[PipeSequencer.scala:11:14]
input io_dis_valid, // @[PipeSequencer.scala:11:14]
input [31:0] io_dis_bits_bits, // @[PipeSequencer.scala:11:14]
input [6:0] io_dis_bits_vconfig_vl, // @[PipeSequencer.scala:11:14]
input [2:0] io_dis_bits_vconfig_vtype_vsew, // @[PipeSequencer.scala:11:14]
input io_dis_bits_vconfig_vtype_vlmul_sign, // @[PipeSequencer.scala:11:14]
input [1:0] io_dis_bits_vconfig_vtype_vlmul_mag, // @[PipeSequencer.scala:11:14]
input [5:0] io_dis_bits_vstart, // @[PipeSequencer.scala:11:14]
input [63:0] io_dis_bits_rs1_data, // @[PipeSequencer.scala:11:14]
input [2:0] io_dis_bits_vat, // @[PipeSequencer.scala:11:14]
input [1:0] io_dis_bits_emul, // @[PipeSequencer.scala:11:14]
input io_dis_bits_rs1_is_rs2, // @[PipeSequencer.scala:11:14]
input io_dis_bits_renv2, // @[PipeSequencer.scala:11:14]
input io_dis_bits_renvm, // @[PipeSequencer.scala:11:14]
output io_seq_hazard_valid, // @[PipeSequencer.scala:11:14]
output [2:0] io_seq_hazard_bits_vat, // @[PipeSequencer.scala:11:14]
output [31:0] io_seq_hazard_bits_rintent, // @[PipeSequencer.scala:11:14]
output [2:0] io_vat, // @[PipeSequencer.scala:11:14]
input [2:0] io_vat_head, // @[PipeSequencer.scala:11:14]
input [31:0] io_older_writes, // @[PipeSequencer.scala:11:14]
output io_busy, // @[PipeSequencer.scala:11:14]
input io_rvs2_req_ready, // @[PipeSequencer.scala:11:14]
output io_rvs2_req_valid, // @[PipeSequencer.scala:11:14]
output [4:0] io_rvs2_req_bits_eg, // @[PipeSequencer.scala:11:14]
output io_rvs2_req_bits_oldest, // @[PipeSequencer.scala:11:14]
input [63:0] io_rvs2_resp, // @[PipeSequencer.scala:11:14]
input io_rvm_req_ready, // @[PipeSequencer.scala:11:14]
output io_rvm_req_valid, // @[PipeSequencer.scala:11:14]
output io_rvm_req_bits_oldest, // @[PipeSequencer.scala:11:14]
input [63:0] io_rvm_resp, // @[PipeSequencer.scala:11:14]
input io_iss_ready, // @[PipeSequencer.scala:11:14]
output io_iss_valid, // @[PipeSequencer.scala:11:14]
output io_iss_bits_renv2, // @[PipeSequencer.scala:11:14]
output io_iss_bits_renvm, // @[PipeSequencer.scala:11:14]
output [63:0] io_iss_bits_rvs2_data, // @[PipeSequencer.scala:11:14]
output [5:0] io_iss_bits_eidx, // @[PipeSequencer.scala:11:14]
output [1:0] io_iss_bits_rvs2_eew, // @[PipeSequencer.scala:11:14]
output [63:0] io_iss_bits_rvm_data, // @[PipeSequencer.scala:11:14]
output io_iss_bits_vmu, // @[PipeSequencer.scala:11:14]
output [6:0] io_iss_bits_vl, // @[PipeSequencer.scala:11:14]
output io_iss_bits_tail // @[PipeSequencer.scala:11:14]
);
wire io_iss_valid_0; // @[PermuteSequencer.scala:88:{25,41,73}]
wire [4:0] _io_rvs2_req_bits_eg_T; // @[Parameters.scala:344:10]
reg valid; // @[PermuteSequencer.scala:17:22]
reg [31:0] inst_bits; // @[PermuteSequencer.scala:18:18]
reg [6:0] inst_vconfig_vl; // @[PermuteSequencer.scala:18:18]
reg [2:0] inst_vconfig_vtype_vsew; // @[PermuteSequencer.scala:18:18]
reg inst_vconfig_vtype_vlmul_sign; // @[PermuteSequencer.scala:18:18]
reg [1:0] inst_vconfig_vtype_vlmul_mag; // @[PermuteSequencer.scala:18:18]
reg [2:0] inst_vat; // @[PermuteSequencer.scala:18:18]
reg inst_rs1_is_rs2; // @[PermuteSequencer.scala:18:18]
reg inst_renv2; // @[PermuteSequencer.scala:18:18]
reg inst_renvm; // @[PermuteSequencer.scala:18:18]
reg [5:0] eidx; // @[PermuteSequencer.scala:19:18]
reg [31:0] rvs2_mask; // @[PermuteSequencer.scala:20:22]
reg rvm_mask; // @[PermuteSequencer.scala:21:21]
reg [6:0] slide_offset; // @[PermuteSequencer.scala:23:25]
wire [2:0] incr_eew = inst_bits[6:0] == 7'h7 | inst_bits[6:0] == 7'h27 ? {1'h0, inst_bits[13:12]} : ~(|(inst_bits[14:12])) & (~(|(inst_bits[14:12])) | inst_bits[14:12] == 3'h3 | inst_bits[14:12] == 3'h4 ? {1'h0, inst_bits[31:26]} : 7'h40) == 7'hE ? 3'h1 : inst_vconfig_vtype_vsew; // @[Bundles.scala:56:20, :58:26, :72:20, :75:20, :84:{18,35}]
wire [6:0] _eff_vl_T_9 = 7'h40 >> {1'h0, inst_vconfig_vtype_vsew} + {1'h0, inst_vconfig_vtype_vlmul_sign, ~inst_vconfig_vtype_vlmul_mag}; // @[PermuteSequencer.scala:18:18]
wire [6:0] _eff_vl_T_10 = inst_vconfig_vl + slide_offset; // @[PermuteSequencer.scala:18:18, :23:25, :34:97]
wire [6:0] eff_vl = ~(inst_bits[6:0] == 7'h7 | inst_bits[6:0] == 7'h27) & (|(inst_bits[14:12])) ? (inst_bits[26] ? (_eff_vl_T_9 > _eff_vl_T_10 ? _eff_vl_T_10 : _eff_vl_T_9) : inst_vconfig_vl - slide_offset) : inst_vconfig_vl; // @[Bundles.scala:56:20, :72:20, :75:20]
wire [2:0] _offset_T_13 = 3'h3 - incr_eew; // @[PipeSequencer.scala:54:33]
wire [5:0] _GEN = {3'h0, _offset_T_13}; // @[PipeSequencer.scala:54:{15,33}]
wire [13:0] _next_eidx_next_T_14 = {7'h0, {1'h0, eidx >> _GEN} + 7'h1} << _offset_T_13; // @[PipeSequencer.scala:54:{15,33,52,60}]
wire [6:0] next_eidx = eff_vl > _next_eidx_next_T_14[6:0] ? _next_eidx_next_T_14[6:0] : eff_vl; // @[PipeSequencer.scala:40:{34,37}, :52:10, :54:60]
wire tail = next_eidx == eff_vl; // @[PipeSequencer.scala:40:34]
wire _io_dis_ready_T_1 = io_iss_ready & io_iss_valid_0; // @[Decoupled.scala:51:35]
wire io_dis_ready_0 = ~valid | tail & _io_dis_ready_T_1; // @[Decoupled.scala:51:35]
wire [31:0] vs2_read_oh = inst_renv2 ? 32'h1 << _io_rvs2_req_bits_eg_T : 32'h0; // @[OneHot.scala:58:35]
wire oldest = inst_vat == io_vat_head; // @[PermuteSequencer.scala:18:18, :79:25]
wire [5:0] io_rvs2_req_bits_eg_off = eidx >> _GEN; // @[Parameters.scala:343:20]
assign _io_rvs2_req_bits_eg_T = (inst_rs1_is_rs2 ? inst_bits[19:15] : inst_bits[24:20]) + io_rvs2_req_bits_eg_off[4:0]; // @[Parameters.scala:343:20, :344:10]
assign io_iss_valid_0 = valid & (({31'h0, inst_renvm} | vs2_read_oh) & io_older_writes) == 32'h0 & (~inst_renvm | io_rvm_req_ready) & (~inst_renv2 | io_rvs2_req_ready); // @[PermuteSequencer.scala:17:22, :18:18, :73:24, :74:24, :76:{33,48,67}, :88:{25,41,45,52,73,77,84}]
wire _GEN_0 = io_dis_ready_0 & io_dis_valid; // @[Decoupled.scala:51:35]
wire [63:0] _offset_T_9 = io_dis_bits_bits[14] ? io_dis_bits_rs1_data : {59'h0, io_dis_bits_bits[19:15]}; // @[Bundles.scala:72:20, :73:18]
wire [6:0] offset = io_dis_bits_bits[14:12] == 3'h0 | io_dis_bits_bits[14:12] == 3'h3 | io_dis_bits_bits[14:12] == 3'h4 ? (_offset_T_9 > 64'h40 ? 7'h40 : _offset_T_9[6:0]) : 7'h1; // @[Bundles.scala:72:20]
wire slide_1 = ~(io_dis_bits_bits[6:0] == 7'h7 | io_dis_bits_bits[6:0] == 7'h27) & (|(io_dis_bits_bits[14:12])); // @[Bundles.scala:56:20, :72:20]
wire _GEN_1 = _io_dis_ready_T_1 & next_eidx != eff_vl; // @[Decoupled.scala:51:35]
wire [4:0] rs2_1 = io_dis_bits_rs1_is_rs2 ? io_dis_bits_bits[19:15] : io_dis_bits_bits[24:20]; // @[Bundles.scala:69:17, :73:18]
wire [3:0][31:0] _GEN_2 = {{{{8{&(rs2_1[4:3])}}, {8{rs2_1[4:3] == 2'h2}}, {8{rs2_1[4:3] == 2'h1}}, {8{rs2_1[4:3] == 2'h0}}}}, {{{4{&(rs2_1[4:2])}}, {4{rs2_1[4:2] == 3'h6}}, {4{rs2_1[4:2] == 3'h5}}, {4{rs2_1[4:2] == 3'h4}}, {4{rs2_1[4:2] == 3'h3}}, {4{rs2_1[4:2] == 3'h2}}, {4{rs2_1[4:2] == 3'h1}}, {4{rs2_1[4:2] == 3'h0}}}}, {{{2{&(rs2_1[4:1])}}, {2{rs2_1[4:1] == 4'hE}}, {2{rs2_1[4:1] == 4'hD}}, {2{rs2_1[4:1] == 4'hC}}, {2{rs2_1[4:1] == 4'hB}}, {2{rs2_1[4:1] == 4'hA}}, {2{rs2_1[4:1] == 4'h9}}, {2{rs2_1[4:1] == 4'h8}}, {2{rs2_1[4:1] == 4'h7}}, {2{rs2_1[4:1] == 4'h6}}, {2{rs2_1[4:1] == 4'h5}}, {2{rs2_1[4:1] == 4'h4}}, {2{rs2_1[4:1] == 4'h3}}, {2{rs2_1[4:1] == 4'h2}}, {2{rs2_1[4:1] == 4'h1}}, {2{rs2_1[4:1] == 4'h0}}}}, {32'h1 << rs2_1}}; // @[OneHot.scala:58:35]
always @(posedge clock) begin // @[PermuteSequencer.scala:9:7]
if (reset) // @[PermuteSequencer.scala:9:7]
valid <= 1'h0; // @[PermuteSequencer.scala:17:22]
else if (_GEN_0) // @[Decoupled.scala:51:35]
valid <= ~slide_1 | ~(io_dis_bits_bits[26] ? offset >= 7'h40 >> {1'h0, io_dis_bits_vconfig_vtype_vsew} + {1'h0, io_dis_bits_vconfig_vtype_vlmul_sign, ~io_dis_bits_vconfig_vtype_vlmul_mag} : io_dis_bits_vconfig_vl <= offset); // @[Bundles.scala:75:20]
else if (_io_dis_ready_T_1) // @[Decoupled.scala:51:35]
valid <= next_eidx != eff_vl; // @[PipeSequencer.scala:40:34]
if (_GEN_0) begin // @[Decoupled.scala:51:35]
inst_bits <= io_dis_bits_bits; // @[PermuteSequencer.scala:18:18]
inst_vconfig_vl <= io_dis_bits_vconfig_vl; // @[PermuteSequencer.scala:18:18]
inst_vconfig_vtype_vsew <= io_dis_bits_vconfig_vtype_vsew; // @[PermuteSequencer.scala:18:18]
inst_vconfig_vtype_vlmul_sign <= io_dis_bits_vconfig_vtype_vlmul_sign; // @[PermuteSequencer.scala:18:18]
inst_vconfig_vtype_vlmul_mag <= io_dis_bits_vconfig_vtype_vlmul_mag; // @[PermuteSequencer.scala:18:18]
inst_vat <= io_dis_bits_vat; // @[PermuteSequencer.scala:18:18]
inst_rs1_is_rs2 <= io_dis_bits_rs1_is_rs2; // @[PermuteSequencer.scala:18:18]
inst_renv2 <= io_dis_bits_renv2; // @[PermuteSequencer.scala:18:18]
inst_renvm <= io_dis_bits_renvm; // @[PermuteSequencer.scala:18:18]
slide_offset <= offset; // @[PermuteSequencer.scala:23:25, :44:21]
end
if (_GEN_1) // @[PermuteSequencer.scala:99:21]
eidx <= next_eidx[5:0]; // @[PipeSequencer.scala:40:34]
else if (_GEN_0) // @[Decoupled.scala:51:35]
eidx <= slide_1 ? (io_dis_bits_bits[26] ? offset[5:0] : 6'h0) : io_dis_bits_vstart; // @[Bundles.scala:75:20]
if (~_GEN_1 | next_eidx >> _offset_T_13 == {1'h0, eidx >> _GEN}) begin // @[PipeSequencer.scala:40:34, :54:{15,33}, :60:{16,27,37}]
if (_GEN_0) // @[Decoupled.scala:51:35]
rvs2_mask <= io_dis_bits_renv2 ? _GEN_2[io_dis_bits_emul] : 32'h0; // @[PermuteSequencer.scala:20:22, :59:{21,53}]
end
else // @[PermuteSequencer.scala:42:22, :99:31, :100:91]
rvs2_mask <= rvs2_mask & ~vs2_read_oh; // @[PermuteSequencer.scala:20:22, :73:24, :101:{30,32}]
rvm_mask <= ~(_GEN_1 & next_eidx[6]) & (_GEN_0 ? io_dis_bits_renvm : rvm_mask); // @[Decoupled.scala:51:35]
always @(posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
package constellation.channel
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.util._
import constellation.noc.{HasNoCParams}
class NoCMonitor(val cParam: ChannelParams)(implicit val p: Parameters) extends Module with HasNoCParams {
val io = IO(new Bundle {
val in = Input(new Channel(cParam))
})
val in_flight = RegInit(VecInit(Seq.fill(cParam.nVirtualChannels) { false.B }))
for (i <- 0 until cParam.srcSpeedup) {
val flit = io.in.flit(i)
when (flit.valid) {
when (flit.bits.head) {
in_flight(flit.bits.virt_channel_id) := true.B
assert (!in_flight(flit.bits.virt_channel_id), "Flit head/tail sequencing is broken")
}
when (flit.bits.tail) {
in_flight(flit.bits.virt_channel_id) := false.B
}
}
val possibleFlows = cParam.possibleFlows
when (flit.valid && flit.bits.head) {
cParam match {
case n: ChannelParams => n.virtualChannelParams.zipWithIndex.foreach { case (v,i) =>
assert(flit.bits.virt_channel_id =/= i.U || v.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
case _ => assert(cParam.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
}
}
}
File Types.scala:
package constellation.routing
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Parameters}
import constellation.noc.{HasNoCParams}
import constellation.channel.{Flit}
/** A representation for 1 specific virtual channel in wormhole routing
*
* @param src the source node
* @param vc ID for the virtual channel
* @param dst the destination node
* @param n_vc the number of virtual channels
*/
// BEGIN: ChannelRoutingInfo
case class ChannelRoutingInfo(
src: Int,
dst: Int,
vc: Int,
n_vc: Int
) {
// END: ChannelRoutingInfo
require (src >= -1 && dst >= -1 && vc >= 0, s"Illegal $this")
require (!(src == -1 && dst == -1), s"Illegal $this")
require (vc < n_vc, s"Illegal $this")
val isIngress = src == -1
val isEgress = dst == -1
}
/** Represents the properties of a packet that are relevant for routing
* ingressId and egressId uniquely identify a flow, but vnet and dst are used here
* to simplify the implementation of routingrelations
*
* @param ingressId packet's source ingress point
* @param egressId packet's destination egress point
* @param vNet virtual subnetwork identifier
* @param dst packet's destination node ID
*/
// BEGIN: FlowRoutingInfo
case class FlowRoutingInfo(
ingressId: Int,
egressId: Int,
vNetId: Int,
ingressNode: Int,
ingressNodeId: Int,
egressNode: Int,
egressNodeId: Int,
fifo: Boolean
) {
// END: FlowRoutingInfo
def isFlow(f: FlowRoutingBundle): Bool = {
(f.ingress_node === ingressNode.U &&
f.egress_node === egressNode.U &&
f.ingress_node_id === ingressNodeId.U &&
f.egress_node_id === egressNodeId.U)
}
def asLiteral(b: FlowRoutingBundle): BigInt = {
Seq(
(vNetId , b.vnet_id),
(ingressNode , b.ingress_node),
(ingressNodeId , b.ingress_node_id),
(egressNode , b.egress_node),
(egressNodeId , b.egress_node_id)
).foldLeft(0)((l, t) => {
(l << t._2.getWidth) | t._1
})
}
}
class FlowRoutingBundle(implicit val p: Parameters) extends Bundle with HasNoCParams {
// Instead of tracking ingress/egress ID, track the physical destination id and the offset at the destination
// This simplifies the routing tables
val vnet_id = UInt(log2Ceil(nVirtualNetworks).W)
val ingress_node = UInt(log2Ceil(nNodes).W)
val ingress_node_id = UInt(log2Ceil(maxIngressesAtNode).W)
val egress_node = UInt(log2Ceil(nNodes).W)
val egress_node_id = UInt(log2Ceil(maxEgressesAtNode).W)
}
| module NoCMonitor_96( // @[Monitor.scala:11:7]
input clock, // @[Monitor.scala:11:7]
input reset, // @[Monitor.scala:11:7]
input io_in_flit_0_valid, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_head, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_tail, // @[Monitor.scala:12:14]
input [4:0] io_in_flit_0_bits_flow_ingress_node, // @[Monitor.scala:12:14]
input [1:0] io_in_flit_0_bits_flow_ingress_node_id, // @[Monitor.scala:12:14]
input [4:0] io_in_flit_0_bits_flow_egress_node, // @[Monitor.scala:12:14]
input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[Monitor.scala:12:14]
input [2:0] io_in_flit_0_bits_virt_channel_id // @[Monitor.scala:12:14]
);
reg in_flight_0; // @[Monitor.scala:16:26]
reg in_flight_1; // @[Monitor.scala:16:26]
reg in_flight_2; // @[Monitor.scala:16:26]
reg in_flight_3; // @[Monitor.scala:16:26]
reg in_flight_4; // @[Monitor.scala:16:26]
wire _GEN = io_in_flit_0_bits_virt_channel_id == 3'h0; // @[Monitor.scala:21:46]
wire _GEN_0 = io_in_flit_0_bits_virt_channel_id == 3'h1; // @[Monitor.scala:21:46]
wire _GEN_1 = io_in_flit_0_bits_virt_channel_id == 3'h2; // @[Monitor.scala:21:46]
wire _GEN_2 = io_in_flit_0_bits_virt_channel_id == 3'h3; // @[Monitor.scala:21:46] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
package constellation.channel
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.util._
import constellation.noc.{HasNoCParams}
class NoCMonitor(val cParam: ChannelParams)(implicit val p: Parameters) extends Module with HasNoCParams {
val io = IO(new Bundle {
val in = Input(new Channel(cParam))
})
val in_flight = RegInit(VecInit(Seq.fill(cParam.nVirtualChannels) { false.B }))
for (i <- 0 until cParam.srcSpeedup) {
val flit = io.in.flit(i)
when (flit.valid) {
when (flit.bits.head) {
in_flight(flit.bits.virt_channel_id) := true.B
assert (!in_flight(flit.bits.virt_channel_id), "Flit head/tail sequencing is broken")
}
when (flit.bits.tail) {
in_flight(flit.bits.virt_channel_id) := false.B
}
}
val possibleFlows = cParam.possibleFlows
when (flit.valid && flit.bits.head) {
cParam match {
case n: ChannelParams => n.virtualChannelParams.zipWithIndex.foreach { case (v,i) =>
assert(flit.bits.virt_channel_id =/= i.U || v.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
case _ => assert(cParam.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
}
}
}
File Types.scala:
package constellation.routing
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Parameters}
import constellation.noc.{HasNoCParams}
import constellation.channel.{Flit}
/** A representation for 1 specific virtual channel in wormhole routing
*
* @param src the source node
* @param vc ID for the virtual channel
* @param dst the destination node
* @param n_vc the number of virtual channels
*/
// BEGIN: ChannelRoutingInfo
case class ChannelRoutingInfo(
src: Int,
dst: Int,
vc: Int,
n_vc: Int
) {
// END: ChannelRoutingInfo
require (src >= -1 && dst >= -1 && vc >= 0, s"Illegal $this")
require (!(src == -1 && dst == -1), s"Illegal $this")
require (vc < n_vc, s"Illegal $this")
val isIngress = src == -1
val isEgress = dst == -1
}
/** Represents the properties of a packet that are relevant for routing
* ingressId and egressId uniquely identify a flow, but vnet and dst are used here
* to simplify the implementation of routingrelations
*
* @param ingressId packet's source ingress point
* @param egressId packet's destination egress point
* @param vNet virtual subnetwork identifier
* @param dst packet's destination node ID
*/
// BEGIN: FlowRoutingInfo
case class FlowRoutingInfo(
ingressId: Int,
egressId: Int,
vNetId: Int,
ingressNode: Int,
ingressNodeId: Int,
egressNode: Int,
egressNodeId: Int,
fifo: Boolean
) {
// END: FlowRoutingInfo
def isFlow(f: FlowRoutingBundle): Bool = {
(f.ingress_node === ingressNode.U &&
f.egress_node === egressNode.U &&
f.ingress_node_id === ingressNodeId.U &&
f.egress_node_id === egressNodeId.U)
}
def asLiteral(b: FlowRoutingBundle): BigInt = {
Seq(
(vNetId , b.vnet_id),
(ingressNode , b.ingress_node),
(ingressNodeId , b.ingress_node_id),
(egressNode , b.egress_node),
(egressNodeId , b.egress_node_id)
).foldLeft(0)((l, t) => {
(l << t._2.getWidth) | t._1
})
}
}
class FlowRoutingBundle(implicit val p: Parameters) extends Bundle with HasNoCParams {
// Instead of tracking ingress/egress ID, track the physical destination id and the offset at the destination
// This simplifies the routing tables
val vnet_id = UInt(log2Ceil(nVirtualNetworks).W)
val ingress_node = UInt(log2Ceil(nNodes).W)
val ingress_node_id = UInt(log2Ceil(maxIngressesAtNode).W)
val egress_node = UInt(log2Ceil(nNodes).W)
val egress_node_id = UInt(log2Ceil(maxEgressesAtNode).W)
}
| module NoCMonitor_55( // @[Monitor.scala:11:7]
input clock, // @[Monitor.scala:11:7]
input reset, // @[Monitor.scala:11:7]
input io_in_flit_0_valid, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_head, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_tail, // @[Monitor.scala:12:14]
input [3:0] io_in_flit_0_bits_flow_ingress_node, // @[Monitor.scala:12:14]
input [1:0] io_in_flit_0_bits_flow_ingress_node_id, // @[Monitor.scala:12:14]
input [3:0] io_in_flit_0_bits_flow_egress_node, // @[Monitor.scala:12:14]
input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_virt_channel_id // @[Monitor.scala:12:14]
);
reg in_flight_0; // @[Monitor.scala:16:26]
reg in_flight_1; // @[Monitor.scala:16:26] |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File recFNFromFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
object recFNFromFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits) =
{
val rawIn = rawFloatFromFN(expWidth, sigWidth, in)
rawIn.sign ##
(Mux(rawIn.isZero, 0.U(3.W), rawIn.sExp(expWidth, expWidth - 2)) |
Mux(rawIn.isNaN, 1.U, 0.U)) ##
rawIn.sExp(expWidth - 3, 0) ##
rawIn.sig(sigWidth - 2, 0)
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
File fNFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
object fNFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits) =
{
val minNormExp = (BigInt(1)<<(expWidth - 1)) + 2
val rawIn = rawFloatFromRecFN(expWidth, sigWidth, in)
val isSubnormal = rawIn.sExp < minNormExp.S
val denormShiftDist = 1.U - rawIn.sExp(log2Up(sigWidth - 1) - 1, 0)
val denormFract = ((rawIn.sig>>1)>>denormShiftDist)(sigWidth - 2, 0)
val expOut =
Mux(isSubnormal,
0.U,
rawIn.sExp(expWidth - 1, 0) -
((BigInt(1)<<(expWidth - 1)) + 1).U
) | Fill(expWidth, rawIn.isNaN || rawIn.isInf)
val fractOut =
Mux(isSubnormal,
denormFract,
Mux(rawIn.isInf, 0.U, rawIn.sig(sigWidth - 2, 0))
)
Cat(rawIn.sign, expOut, fractOut)
}
}
File rawFloatFromFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
object rawFloatFromFN {
def apply(expWidth: Int, sigWidth: Int, in: Bits) = {
val sign = in(expWidth + sigWidth - 1)
val expIn = in(expWidth + sigWidth - 2, sigWidth - 1)
val fractIn = in(sigWidth - 2, 0)
val isZeroExpIn = (expIn === 0.U)
val isZeroFractIn = (fractIn === 0.U)
val normDist = countLeadingZeros(fractIn)
val subnormFract = (fractIn << normDist) (sigWidth - 3, 0) << 1
val adjustedExp =
Mux(isZeroExpIn,
normDist ^ ((BigInt(1) << (expWidth + 1)) - 1).U,
expIn
) + ((BigInt(1) << (expWidth - 1)).U
| Mux(isZeroExpIn, 2.U, 1.U))
val isZero = isZeroExpIn && isZeroFractIn
val isSpecial = adjustedExp(expWidth, expWidth - 1) === 3.U
val out = Wire(new RawFloat(expWidth, sigWidth))
out.isNaN := isSpecial && !isZeroFractIn
out.isInf := isSpecial && isZeroFractIn
out.isZero := isZero
out.sign := sign
out.sExp := adjustedExp(expWidth, 0).zext
out.sig :=
0.U(1.W) ## !isZero ## Mux(isZeroExpIn, subnormFract, fractIn)
out
}
}
File rawFloatFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
/*----------------------------------------------------------------------------
| In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be
| set.
*----------------------------------------------------------------------------*/
object rawFloatFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat =
{
val exp = in(expWidth + sigWidth - 1, sigWidth - 1)
val isZero = exp(expWidth, expWidth - 2) === 0.U
val isSpecial = exp(expWidth, expWidth - 1) === 3.U
val out = Wire(new RawFloat(expWidth, sigWidth))
out.isNaN := isSpecial && exp(expWidth - 2)
out.isInf := isSpecial && ! exp(expWidth - 2)
out.isZero := isZero
out.sign := in(expWidth + sigWidth)
out.sExp := exp.zext
out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0)
out
}
}
| module PE_22( // @[PE.scala:31:7]
input clock, // @[PE.scala:31:7]
input reset, // @[PE.scala:31:7]
input [31:0] io_in_a_bits, // @[PE.scala:35:14]
input [31:0] io_in_b_bits, // @[PE.scala:35:14]
input [31:0] io_in_d_bits, // @[PE.scala:35:14]
output [31:0] io_out_a_bits, // @[PE.scala:35:14]
output [31:0] io_out_b_bits, // @[PE.scala:35:14]
output [31:0] io_out_c_bits, // @[PE.scala:35:14]
input io_in_control_dataflow, // @[PE.scala:35:14]
input io_in_control_propagate, // @[PE.scala:35:14]
input [4:0] io_in_control_shift, // @[PE.scala:35:14]
output io_out_control_dataflow, // @[PE.scala:35:14]
output io_out_control_propagate, // @[PE.scala:35:14]
output [4:0] io_out_control_shift, // @[PE.scala:35:14]
input [3:0] io_in_id, // @[PE.scala:35:14]
output [3:0] io_out_id, // @[PE.scala:35:14]
input io_in_last, // @[PE.scala:35:14]
output io_out_last, // @[PE.scala:35:14]
input io_in_valid, // @[PE.scala:35:14]
output io_out_valid, // @[PE.scala:35:14]
output io_bad_dataflow // @[PE.scala:35:14]
);
wire c2_self_rec_rawIn_isNaN; // @[rawFloatFromFN.scala:63:19]
wire io_out_c_self_rec_rawIn_3_isNaN; // @[rawFloatFromFN.scala:63:19]
wire io_out_c_shift_rec_rawIn_1_isNaN; // @[rawFloatFromFN.scala:63:19]
wire io_out_c_self_rec_rawIn_2_isNaN; // @[rawFloatFromFN.scala:63:19]
wire c1_self_rec_rawIn_isNaN; // @[rawFloatFromFN.scala:63:19]
wire io_out_c_self_rec_rawIn_1_isNaN; // @[rawFloatFromFN.scala:63:19]
wire io_out_c_shift_rec_rawIn_isNaN; // @[rawFloatFromFN.scala:63:19]
wire io_out_c_self_rec_rawIn_isNaN; // @[rawFloatFromFN.scala:63:19]
wire [32:0] _c2_resizer_io_out; // @[Arithmetic.scala:486:29]
wire [32:0] _io_out_c_resizer_1_io_out; // @[Arithmetic.scala:500:29]
wire [32:0] _io_out_c_muladder_1_io_out; // @[Arithmetic.scala:450:30]
wire [32:0] _c1_resizer_io_out; // @[Arithmetic.scala:486:29]
wire [32:0] _io_out_c_resizer_io_out; // @[Arithmetic.scala:500:29]
wire [32:0] _io_out_c_muladder_io_out; // @[Arithmetic.scala:450:30]
wire [31:0] _mac_unit_io_out_d_bits; // @[PE.scala:64:24]
wire [31:0] io_in_a_bits_0 = io_in_a_bits; // @[PE.scala:31:7]
wire [31:0] io_in_b_bits_0 = io_in_b_bits; // @[PE.scala:31:7]
wire [31:0] io_in_d_bits_0 = io_in_d_bits; // @[PE.scala:31:7]
wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7]
wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7]
wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7]
wire [3:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7]
wire io_in_last_0 = io_in_last; // @[PE.scala:31:7]
wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7]
wire _io_out_c_T_1 = reset; // @[Arithmetic.scala:447:15]
wire _io_out_c_T_5 = reset; // @[Arithmetic.scala:447:15]
wire io_bad_dataflow_0 = 1'h0; // @[PE.scala:31:7]
wire [31:0] io_out_a_bits_0 = io_in_a_bits_0; // @[PE.scala:31:7]
wire [31:0] _mac_unit_io_in_b_WIRE_1 = io_in_b_bits_0; // @[PE.scala:31:7, :106:37]
wire [31:0] _mac_unit_io_in_b_WIRE_3 = io_in_b_bits_0; // @[PE.scala:31:7, :113:37]
wire [31:0] _mac_unit_io_in_b_WIRE_9 = io_in_b_bits_0; // @[PE.scala:31:7, :137:35]
wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7]
wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7]
wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7]
wire [3:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7]
wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7]
wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7]
wire [31:0] io_out_b_bits_0; // @[PE.scala:31:7]
wire [31:0] io_out_c_bits_0; // @[PE.scala:31:7]
reg [31:0] c1_bits; // @[PE.scala:70:15]
wire [31:0] _mac_unit_io_in_b_WIRE_7 = c1_bits; // @[PE.scala:70:15, :127:38]
reg [31:0] c2_bits; // @[PE.scala:71:15]
wire [31:0] _mac_unit_io_in_b_WIRE_5 = c2_bits; // @[PE.scala:71:15, :121:38]
reg last_s; // @[PE.scala:89:25]
wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21]
wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25]
wire io_out_c_self_rec_rawIn_sign = c1_bits[31]; // @[rawFloatFromFN.scala:44:18]
wire io_out_c_self_rec_rawIn_sign_0 = io_out_c_self_rec_rawIn_sign; // @[rawFloatFromFN.scala:44:18, :63:19]
wire [7:0] io_out_c_self_rec_rawIn_expIn = c1_bits[30:23]; // @[rawFloatFromFN.scala:45:19]
wire [22:0] io_out_c_self_rec_rawIn_fractIn = c1_bits[22:0]; // @[rawFloatFromFN.scala:46:21]
wire io_out_c_self_rec_rawIn_isZeroExpIn = io_out_c_self_rec_rawIn_expIn == 8'h0; // @[rawFloatFromFN.scala:45:19, :48:30]
wire io_out_c_self_rec_rawIn_isZeroFractIn = io_out_c_self_rec_rawIn_fractIn == 23'h0; // @[rawFloatFromFN.scala:46:21, :49:34]
wire _io_out_c_self_rec_rawIn_normDist_T = io_out_c_self_rec_rawIn_fractIn[0]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_1 = io_out_c_self_rec_rawIn_fractIn[1]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_2 = io_out_c_self_rec_rawIn_fractIn[2]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_3 = io_out_c_self_rec_rawIn_fractIn[3]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_4 = io_out_c_self_rec_rawIn_fractIn[4]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_5 = io_out_c_self_rec_rawIn_fractIn[5]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_6 = io_out_c_self_rec_rawIn_fractIn[6]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_7 = io_out_c_self_rec_rawIn_fractIn[7]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_8 = io_out_c_self_rec_rawIn_fractIn[8]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_9 = io_out_c_self_rec_rawIn_fractIn[9]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_10 = io_out_c_self_rec_rawIn_fractIn[10]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_11 = io_out_c_self_rec_rawIn_fractIn[11]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_12 = io_out_c_self_rec_rawIn_fractIn[12]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_13 = io_out_c_self_rec_rawIn_fractIn[13]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_14 = io_out_c_self_rec_rawIn_fractIn[14]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_15 = io_out_c_self_rec_rawIn_fractIn[15]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_16 = io_out_c_self_rec_rawIn_fractIn[16]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_17 = io_out_c_self_rec_rawIn_fractIn[17]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_18 = io_out_c_self_rec_rawIn_fractIn[18]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_19 = io_out_c_self_rec_rawIn_fractIn[19]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_20 = io_out_c_self_rec_rawIn_fractIn[20]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_21 = io_out_c_self_rec_rawIn_fractIn[21]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_22 = io_out_c_self_rec_rawIn_fractIn[22]; // @[rawFloatFromFN.scala:46:21]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_23 = _io_out_c_self_rec_rawIn_normDist_T_1 ? 5'h15 : 5'h16; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_24 = _io_out_c_self_rec_rawIn_normDist_T_2 ? 5'h14 : _io_out_c_self_rec_rawIn_normDist_T_23; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_25 = _io_out_c_self_rec_rawIn_normDist_T_3 ? 5'h13 : _io_out_c_self_rec_rawIn_normDist_T_24; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_26 = _io_out_c_self_rec_rawIn_normDist_T_4 ? 5'h12 : _io_out_c_self_rec_rawIn_normDist_T_25; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_27 = _io_out_c_self_rec_rawIn_normDist_T_5 ? 5'h11 : _io_out_c_self_rec_rawIn_normDist_T_26; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_28 = _io_out_c_self_rec_rawIn_normDist_T_6 ? 5'h10 : _io_out_c_self_rec_rawIn_normDist_T_27; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_29 = _io_out_c_self_rec_rawIn_normDist_T_7 ? 5'hF : _io_out_c_self_rec_rawIn_normDist_T_28; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_30 = _io_out_c_self_rec_rawIn_normDist_T_8 ? 5'hE : _io_out_c_self_rec_rawIn_normDist_T_29; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_31 = _io_out_c_self_rec_rawIn_normDist_T_9 ? 5'hD : _io_out_c_self_rec_rawIn_normDist_T_30; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_32 = _io_out_c_self_rec_rawIn_normDist_T_10 ? 5'hC : _io_out_c_self_rec_rawIn_normDist_T_31; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_33 = _io_out_c_self_rec_rawIn_normDist_T_11 ? 5'hB : _io_out_c_self_rec_rawIn_normDist_T_32; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_34 = _io_out_c_self_rec_rawIn_normDist_T_12 ? 5'hA : _io_out_c_self_rec_rawIn_normDist_T_33; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_35 = _io_out_c_self_rec_rawIn_normDist_T_13 ? 5'h9 : _io_out_c_self_rec_rawIn_normDist_T_34; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_36 = _io_out_c_self_rec_rawIn_normDist_T_14 ? 5'h8 : _io_out_c_self_rec_rawIn_normDist_T_35; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_37 = _io_out_c_self_rec_rawIn_normDist_T_15 ? 5'h7 : _io_out_c_self_rec_rawIn_normDist_T_36; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_38 = _io_out_c_self_rec_rawIn_normDist_T_16 ? 5'h6 : _io_out_c_self_rec_rawIn_normDist_T_37; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_39 = _io_out_c_self_rec_rawIn_normDist_T_17 ? 5'h5 : _io_out_c_self_rec_rawIn_normDist_T_38; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_40 = _io_out_c_self_rec_rawIn_normDist_T_18 ? 5'h4 : _io_out_c_self_rec_rawIn_normDist_T_39; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_41 = _io_out_c_self_rec_rawIn_normDist_T_19 ? 5'h3 : _io_out_c_self_rec_rawIn_normDist_T_40; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_42 = _io_out_c_self_rec_rawIn_normDist_T_20 ? 5'h2 : _io_out_c_self_rec_rawIn_normDist_T_41; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_43 = _io_out_c_self_rec_rawIn_normDist_T_21 ? 5'h1 : _io_out_c_self_rec_rawIn_normDist_T_42; // @[Mux.scala:50:70]
wire [4:0] io_out_c_self_rec_rawIn_normDist = _io_out_c_self_rec_rawIn_normDist_T_22 ? 5'h0 : _io_out_c_self_rec_rawIn_normDist_T_43; // @[Mux.scala:50:70]
wire [53:0] _io_out_c_self_rec_rawIn_subnormFract_T = {31'h0, io_out_c_self_rec_rawIn_fractIn} << io_out_c_self_rec_rawIn_normDist; // @[Mux.scala:50:70]
wire [21:0] _io_out_c_self_rec_rawIn_subnormFract_T_1 = _io_out_c_self_rec_rawIn_subnormFract_T[21:0]; // @[rawFloatFromFN.scala:52:{33,46}]
wire [22:0] io_out_c_self_rec_rawIn_subnormFract = {_io_out_c_self_rec_rawIn_subnormFract_T_1, 1'h0}; // @[rawFloatFromFN.scala:52:{46,64}]
wire [8:0] _io_out_c_self_rec_rawIn_adjustedExp_T = {4'hF, ~io_out_c_self_rec_rawIn_normDist}; // @[Mux.scala:50:70]
wire [8:0] _io_out_c_self_rec_rawIn_adjustedExp_T_1 = io_out_c_self_rec_rawIn_isZeroExpIn ? _io_out_c_self_rec_rawIn_adjustedExp_T : {1'h0, io_out_c_self_rec_rawIn_expIn}; // @[rawFloatFromFN.scala:45:19, :48:30, :54:10, :55:18]
wire [1:0] _io_out_c_self_rec_rawIn_adjustedExp_T_2 = io_out_c_self_rec_rawIn_isZeroExpIn ? 2'h2 : 2'h1; // @[rawFloatFromFN.scala:48:30, :58:14]
wire [7:0] _io_out_c_self_rec_rawIn_adjustedExp_T_3 = {6'h20, _io_out_c_self_rec_rawIn_adjustedExp_T_2}; // @[rawFloatFromFN.scala:58:{9,14}]
wire [9:0] _io_out_c_self_rec_rawIn_adjustedExp_T_4 = {1'h0, _io_out_c_self_rec_rawIn_adjustedExp_T_1} + {2'h0, _io_out_c_self_rec_rawIn_adjustedExp_T_3}; // @[rawFloatFromFN.scala:54:10, :57:9, :58:9]
wire [8:0] io_out_c_self_rec_rawIn_adjustedExp = _io_out_c_self_rec_rawIn_adjustedExp_T_4[8:0]; // @[rawFloatFromFN.scala:57:9]
wire [8:0] _io_out_c_self_rec_rawIn_out_sExp_T = io_out_c_self_rec_rawIn_adjustedExp; // @[rawFloatFromFN.scala:57:9, :68:28]
wire io_out_c_self_rec_rawIn_isZero = io_out_c_self_rec_rawIn_isZeroExpIn & io_out_c_self_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:48:30, :49:34, :60:30]
wire io_out_c_self_rec_rawIn_isZero_0 = io_out_c_self_rec_rawIn_isZero; // @[rawFloatFromFN.scala:60:30, :63:19]
wire [1:0] _io_out_c_self_rec_rawIn_isSpecial_T = io_out_c_self_rec_rawIn_adjustedExp[8:7]; // @[rawFloatFromFN.scala:57:9, :61:32]
wire io_out_c_self_rec_rawIn_isSpecial = &_io_out_c_self_rec_rawIn_isSpecial_T; // @[rawFloatFromFN.scala:61:{32,57}]
wire _io_out_c_self_rec_rawIn_out_isNaN_T_1; // @[rawFloatFromFN.scala:64:28]
wire _io_out_c_self_rec_rawIn_out_isInf_T; // @[rawFloatFromFN.scala:65:28]
wire _io_out_c_self_rec_T_2 = io_out_c_self_rec_rawIn_isNaN; // @[recFNFromFN.scala:49:20]
wire [9:0] _io_out_c_self_rec_rawIn_out_sExp_T_1; // @[rawFloatFromFN.scala:68:42]
wire [24:0] _io_out_c_self_rec_rawIn_out_sig_T_3; // @[rawFloatFromFN.scala:70:27]
wire io_out_c_self_rec_rawIn_isInf; // @[rawFloatFromFN.scala:63:19]
wire [9:0] io_out_c_self_rec_rawIn_sExp; // @[rawFloatFromFN.scala:63:19]
wire [24:0] io_out_c_self_rec_rawIn_sig; // @[rawFloatFromFN.scala:63:19]
wire _io_out_c_self_rec_rawIn_out_isNaN_T = ~io_out_c_self_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:49:34, :64:31]
assign _io_out_c_self_rec_rawIn_out_isNaN_T_1 = io_out_c_self_rec_rawIn_isSpecial & _io_out_c_self_rec_rawIn_out_isNaN_T; // @[rawFloatFromFN.scala:61:57, :64:{28,31}]
assign io_out_c_self_rec_rawIn_isNaN = _io_out_c_self_rec_rawIn_out_isNaN_T_1; // @[rawFloatFromFN.scala:63:19, :64:28]
assign _io_out_c_self_rec_rawIn_out_isInf_T = io_out_c_self_rec_rawIn_isSpecial & io_out_c_self_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:49:34, :61:57, :65:28]
assign io_out_c_self_rec_rawIn_isInf = _io_out_c_self_rec_rawIn_out_isInf_T; // @[rawFloatFromFN.scala:63:19, :65:28]
assign _io_out_c_self_rec_rawIn_out_sExp_T_1 = {1'h0, _io_out_c_self_rec_rawIn_out_sExp_T}; // @[rawFloatFromFN.scala:68:{28,42}]
assign io_out_c_self_rec_rawIn_sExp = _io_out_c_self_rec_rawIn_out_sExp_T_1; // @[rawFloatFromFN.scala:63:19, :68:42]
wire _io_out_c_self_rec_rawIn_out_sig_T = ~io_out_c_self_rec_rawIn_isZero; // @[rawFloatFromFN.scala:60:30, :70:19]
wire [1:0] _io_out_c_self_rec_rawIn_out_sig_T_1 = {1'h0, _io_out_c_self_rec_rawIn_out_sig_T}; // @[rawFloatFromFN.scala:70:{16,19}]
wire [22:0] _io_out_c_self_rec_rawIn_out_sig_T_2 = io_out_c_self_rec_rawIn_isZeroExpIn ? io_out_c_self_rec_rawIn_subnormFract : io_out_c_self_rec_rawIn_fractIn; // @[rawFloatFromFN.scala:46:21, :48:30, :52:64, :70:33]
assign _io_out_c_self_rec_rawIn_out_sig_T_3 = {_io_out_c_self_rec_rawIn_out_sig_T_1, _io_out_c_self_rec_rawIn_out_sig_T_2}; // @[rawFloatFromFN.scala:70:{16,27,33}]
assign io_out_c_self_rec_rawIn_sig = _io_out_c_self_rec_rawIn_out_sig_T_3; // @[rawFloatFromFN.scala:63:19, :70:27]
wire [2:0] _io_out_c_self_rec_T = io_out_c_self_rec_rawIn_sExp[8:6]; // @[recFNFromFN.scala:48:50]
wire [2:0] _io_out_c_self_rec_T_1 = io_out_c_self_rec_rawIn_isZero_0 ? 3'h0 : _io_out_c_self_rec_T; // @[recFNFromFN.scala:48:{15,50}]
wire [2:0] _io_out_c_self_rec_T_3 = {_io_out_c_self_rec_T_1[2:1], _io_out_c_self_rec_T_1[0] | _io_out_c_self_rec_T_2}; // @[recFNFromFN.scala:48:{15,76}, :49:20]
wire [3:0] _io_out_c_self_rec_T_4 = {io_out_c_self_rec_rawIn_sign_0, _io_out_c_self_rec_T_3}; // @[recFNFromFN.scala:47:20, :48:76]
wire [5:0] _io_out_c_self_rec_T_5 = io_out_c_self_rec_rawIn_sExp[5:0]; // @[recFNFromFN.scala:50:23]
wire [9:0] _io_out_c_self_rec_T_6 = {_io_out_c_self_rec_T_4, _io_out_c_self_rec_T_5}; // @[recFNFromFN.scala:47:20, :49:45, :50:23]
wire [22:0] _io_out_c_self_rec_T_7 = io_out_c_self_rec_rawIn_sig[22:0]; // @[recFNFromFN.scala:51:22]
wire [32:0] io_out_c_self_rec = {_io_out_c_self_rec_T_6, _io_out_c_self_rec_T_7}; // @[recFNFromFN.scala:49:45, :50:41, :51:22]
wire [7:0] io_out_c_shift_exp; // @[Arithmetic.scala:442:29]
wire [7:0] _GEN = 8'h7F - {3'h0, shift_offset}; // @[PE.scala:91:25]
wire [7:0] _io_out_c_shift_exp_T; // @[Arithmetic.scala:443:34]
assign _io_out_c_shift_exp_T = _GEN; // @[Arithmetic.scala:443:34]
wire [7:0] _io_out_c_shift_exp_T_2; // @[Arithmetic.scala:443:34]
assign _io_out_c_shift_exp_T_2 = _GEN; // @[Arithmetic.scala:443:34]
wire [6:0] _io_out_c_shift_exp_T_1 = _io_out_c_shift_exp_T[6:0]; // @[Arithmetic.scala:443:34]
assign io_out_c_shift_exp = {1'h0, _io_out_c_shift_exp_T_1}; // @[Arithmetic.scala:442:29, :443:{19,34}]
wire [8:0] io_out_c_shift_fn_hi = {1'h0, io_out_c_shift_exp}; // @[Arithmetic.scala:442:29, :444:27]
wire [31:0] io_out_c_shift_fn = {io_out_c_shift_fn_hi, 23'h0}; // @[Arithmetic.scala:444:27]
wire io_out_c_shift_rec_rawIn_sign = io_out_c_shift_fn[31]; // @[rawFloatFromFN.scala:44:18]
wire io_out_c_shift_rec_rawIn_sign_0 = io_out_c_shift_rec_rawIn_sign; // @[rawFloatFromFN.scala:44:18, :63:19]
wire [7:0] io_out_c_shift_rec_rawIn_expIn = io_out_c_shift_fn[30:23]; // @[rawFloatFromFN.scala:45:19]
wire [22:0] io_out_c_shift_rec_rawIn_fractIn = io_out_c_shift_fn[22:0]; // @[rawFloatFromFN.scala:46:21]
wire io_out_c_shift_rec_rawIn_isZeroExpIn = io_out_c_shift_rec_rawIn_expIn == 8'h0; // @[rawFloatFromFN.scala:45:19, :48:30]
wire io_out_c_shift_rec_rawIn_isZeroFractIn = io_out_c_shift_rec_rawIn_fractIn == 23'h0; // @[rawFloatFromFN.scala:46:21, :49:34]
wire _io_out_c_shift_rec_rawIn_normDist_T = io_out_c_shift_rec_rawIn_fractIn[0]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_1 = io_out_c_shift_rec_rawIn_fractIn[1]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_2 = io_out_c_shift_rec_rawIn_fractIn[2]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_3 = io_out_c_shift_rec_rawIn_fractIn[3]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_4 = io_out_c_shift_rec_rawIn_fractIn[4]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_5 = io_out_c_shift_rec_rawIn_fractIn[5]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_6 = io_out_c_shift_rec_rawIn_fractIn[6]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_7 = io_out_c_shift_rec_rawIn_fractIn[7]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_8 = io_out_c_shift_rec_rawIn_fractIn[8]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_9 = io_out_c_shift_rec_rawIn_fractIn[9]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_10 = io_out_c_shift_rec_rawIn_fractIn[10]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_11 = io_out_c_shift_rec_rawIn_fractIn[11]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_12 = io_out_c_shift_rec_rawIn_fractIn[12]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_13 = io_out_c_shift_rec_rawIn_fractIn[13]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_14 = io_out_c_shift_rec_rawIn_fractIn[14]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_15 = io_out_c_shift_rec_rawIn_fractIn[15]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_16 = io_out_c_shift_rec_rawIn_fractIn[16]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_17 = io_out_c_shift_rec_rawIn_fractIn[17]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_18 = io_out_c_shift_rec_rawIn_fractIn[18]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_19 = io_out_c_shift_rec_rawIn_fractIn[19]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_20 = io_out_c_shift_rec_rawIn_fractIn[20]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_21 = io_out_c_shift_rec_rawIn_fractIn[21]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_22 = io_out_c_shift_rec_rawIn_fractIn[22]; // @[rawFloatFromFN.scala:46:21]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_23 = _io_out_c_shift_rec_rawIn_normDist_T_1 ? 5'h15 : 5'h16; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_24 = _io_out_c_shift_rec_rawIn_normDist_T_2 ? 5'h14 : _io_out_c_shift_rec_rawIn_normDist_T_23; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_25 = _io_out_c_shift_rec_rawIn_normDist_T_3 ? 5'h13 : _io_out_c_shift_rec_rawIn_normDist_T_24; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_26 = _io_out_c_shift_rec_rawIn_normDist_T_4 ? 5'h12 : _io_out_c_shift_rec_rawIn_normDist_T_25; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_27 = _io_out_c_shift_rec_rawIn_normDist_T_5 ? 5'h11 : _io_out_c_shift_rec_rawIn_normDist_T_26; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_28 = _io_out_c_shift_rec_rawIn_normDist_T_6 ? 5'h10 : _io_out_c_shift_rec_rawIn_normDist_T_27; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_29 = _io_out_c_shift_rec_rawIn_normDist_T_7 ? 5'hF : _io_out_c_shift_rec_rawIn_normDist_T_28; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_30 = _io_out_c_shift_rec_rawIn_normDist_T_8 ? 5'hE : _io_out_c_shift_rec_rawIn_normDist_T_29; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_31 = _io_out_c_shift_rec_rawIn_normDist_T_9 ? 5'hD : _io_out_c_shift_rec_rawIn_normDist_T_30; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_32 = _io_out_c_shift_rec_rawIn_normDist_T_10 ? 5'hC : _io_out_c_shift_rec_rawIn_normDist_T_31; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_33 = _io_out_c_shift_rec_rawIn_normDist_T_11 ? 5'hB : _io_out_c_shift_rec_rawIn_normDist_T_32; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_34 = _io_out_c_shift_rec_rawIn_normDist_T_12 ? 5'hA : _io_out_c_shift_rec_rawIn_normDist_T_33; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_35 = _io_out_c_shift_rec_rawIn_normDist_T_13 ? 5'h9 : _io_out_c_shift_rec_rawIn_normDist_T_34; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_36 = _io_out_c_shift_rec_rawIn_normDist_T_14 ? 5'h8 : _io_out_c_shift_rec_rawIn_normDist_T_35; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_37 = _io_out_c_shift_rec_rawIn_normDist_T_15 ? 5'h7 : _io_out_c_shift_rec_rawIn_normDist_T_36; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_38 = _io_out_c_shift_rec_rawIn_normDist_T_16 ? 5'h6 : _io_out_c_shift_rec_rawIn_normDist_T_37; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_39 = _io_out_c_shift_rec_rawIn_normDist_T_17 ? 5'h5 : _io_out_c_shift_rec_rawIn_normDist_T_38; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_40 = _io_out_c_shift_rec_rawIn_normDist_T_18 ? 5'h4 : _io_out_c_shift_rec_rawIn_normDist_T_39; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_41 = _io_out_c_shift_rec_rawIn_normDist_T_19 ? 5'h3 : _io_out_c_shift_rec_rawIn_normDist_T_40; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_42 = _io_out_c_shift_rec_rawIn_normDist_T_20 ? 5'h2 : _io_out_c_shift_rec_rawIn_normDist_T_41; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_43 = _io_out_c_shift_rec_rawIn_normDist_T_21 ? 5'h1 : _io_out_c_shift_rec_rawIn_normDist_T_42; // @[Mux.scala:50:70]
wire [4:0] io_out_c_shift_rec_rawIn_normDist = _io_out_c_shift_rec_rawIn_normDist_T_22 ? 5'h0 : _io_out_c_shift_rec_rawIn_normDist_T_43; // @[Mux.scala:50:70]
wire [53:0] _io_out_c_shift_rec_rawIn_subnormFract_T = {31'h0, io_out_c_shift_rec_rawIn_fractIn} << io_out_c_shift_rec_rawIn_normDist; // @[Mux.scala:50:70]
wire [21:0] _io_out_c_shift_rec_rawIn_subnormFract_T_1 = _io_out_c_shift_rec_rawIn_subnormFract_T[21:0]; // @[rawFloatFromFN.scala:52:{33,46}]
wire [22:0] io_out_c_shift_rec_rawIn_subnormFract = {_io_out_c_shift_rec_rawIn_subnormFract_T_1, 1'h0}; // @[rawFloatFromFN.scala:52:{46,64}]
wire [8:0] _io_out_c_shift_rec_rawIn_adjustedExp_T = {4'hF, ~io_out_c_shift_rec_rawIn_normDist}; // @[Mux.scala:50:70]
wire [8:0] _io_out_c_shift_rec_rawIn_adjustedExp_T_1 = io_out_c_shift_rec_rawIn_isZeroExpIn ? _io_out_c_shift_rec_rawIn_adjustedExp_T : {1'h0, io_out_c_shift_rec_rawIn_expIn}; // @[rawFloatFromFN.scala:45:19, :48:30, :54:10, :55:18]
wire [1:0] _io_out_c_shift_rec_rawIn_adjustedExp_T_2 = io_out_c_shift_rec_rawIn_isZeroExpIn ? 2'h2 : 2'h1; // @[rawFloatFromFN.scala:48:30, :58:14]
wire [7:0] _io_out_c_shift_rec_rawIn_adjustedExp_T_3 = {6'h20, _io_out_c_shift_rec_rawIn_adjustedExp_T_2}; // @[rawFloatFromFN.scala:58:{9,14}]
wire [9:0] _io_out_c_shift_rec_rawIn_adjustedExp_T_4 = {1'h0, _io_out_c_shift_rec_rawIn_adjustedExp_T_1} + {2'h0, _io_out_c_shift_rec_rawIn_adjustedExp_T_3}; // @[rawFloatFromFN.scala:54:10, :57:9, :58:9]
wire [8:0] io_out_c_shift_rec_rawIn_adjustedExp = _io_out_c_shift_rec_rawIn_adjustedExp_T_4[8:0]; // @[rawFloatFromFN.scala:57:9]
wire [8:0] _io_out_c_shift_rec_rawIn_out_sExp_T = io_out_c_shift_rec_rawIn_adjustedExp; // @[rawFloatFromFN.scala:57:9, :68:28]
wire io_out_c_shift_rec_rawIn_isZero = io_out_c_shift_rec_rawIn_isZeroExpIn & io_out_c_shift_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:48:30, :49:34, :60:30]
wire io_out_c_shift_rec_rawIn_isZero_0 = io_out_c_shift_rec_rawIn_isZero; // @[rawFloatFromFN.scala:60:30, :63:19]
wire [1:0] _io_out_c_shift_rec_rawIn_isSpecial_T = io_out_c_shift_rec_rawIn_adjustedExp[8:7]; // @[rawFloatFromFN.scala:57:9, :61:32]
wire io_out_c_shift_rec_rawIn_isSpecial = &_io_out_c_shift_rec_rawIn_isSpecial_T; // @[rawFloatFromFN.scala:61:{32,57}]
wire _io_out_c_shift_rec_rawIn_out_isNaN_T_1; // @[rawFloatFromFN.scala:64:28]
wire _io_out_c_shift_rec_rawIn_out_isInf_T; // @[rawFloatFromFN.scala:65:28]
wire _io_out_c_shift_rec_T_2 = io_out_c_shift_rec_rawIn_isNaN; // @[recFNFromFN.scala:49:20]
wire [9:0] _io_out_c_shift_rec_rawIn_out_sExp_T_1; // @[rawFloatFromFN.scala:68:42]
wire [24:0] _io_out_c_shift_rec_rawIn_out_sig_T_3; // @[rawFloatFromFN.scala:70:27]
wire io_out_c_shift_rec_rawIn_isInf; // @[rawFloatFromFN.scala:63:19]
wire [9:0] io_out_c_shift_rec_rawIn_sExp; // @[rawFloatFromFN.scala:63:19]
wire [24:0] io_out_c_shift_rec_rawIn_sig; // @[rawFloatFromFN.scala:63:19]
wire _io_out_c_shift_rec_rawIn_out_isNaN_T = ~io_out_c_shift_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:49:34, :64:31]
assign _io_out_c_shift_rec_rawIn_out_isNaN_T_1 = io_out_c_shift_rec_rawIn_isSpecial & _io_out_c_shift_rec_rawIn_out_isNaN_T; // @[rawFloatFromFN.scala:61:57, :64:{28,31}]
assign io_out_c_shift_rec_rawIn_isNaN = _io_out_c_shift_rec_rawIn_out_isNaN_T_1; // @[rawFloatFromFN.scala:63:19, :64:28]
assign _io_out_c_shift_rec_rawIn_out_isInf_T = io_out_c_shift_rec_rawIn_isSpecial & io_out_c_shift_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:49:34, :61:57, :65:28]
assign io_out_c_shift_rec_rawIn_isInf = _io_out_c_shift_rec_rawIn_out_isInf_T; // @[rawFloatFromFN.scala:63:19, :65:28]
assign _io_out_c_shift_rec_rawIn_out_sExp_T_1 = {1'h0, _io_out_c_shift_rec_rawIn_out_sExp_T}; // @[rawFloatFromFN.scala:68:{28,42}]
assign io_out_c_shift_rec_rawIn_sExp = _io_out_c_shift_rec_rawIn_out_sExp_T_1; // @[rawFloatFromFN.scala:63:19, :68:42]
wire _io_out_c_shift_rec_rawIn_out_sig_T = ~io_out_c_shift_rec_rawIn_isZero; // @[rawFloatFromFN.scala:60:30, :70:19]
wire [1:0] _io_out_c_shift_rec_rawIn_out_sig_T_1 = {1'h0, _io_out_c_shift_rec_rawIn_out_sig_T}; // @[rawFloatFromFN.scala:70:{16,19}]
wire [22:0] _io_out_c_shift_rec_rawIn_out_sig_T_2 = io_out_c_shift_rec_rawIn_isZeroExpIn ? io_out_c_shift_rec_rawIn_subnormFract : io_out_c_shift_rec_rawIn_fractIn; // @[rawFloatFromFN.scala:46:21, :48:30, :52:64, :70:33]
assign _io_out_c_shift_rec_rawIn_out_sig_T_3 = {_io_out_c_shift_rec_rawIn_out_sig_T_1, _io_out_c_shift_rec_rawIn_out_sig_T_2}; // @[rawFloatFromFN.scala:70:{16,27,33}]
assign io_out_c_shift_rec_rawIn_sig = _io_out_c_shift_rec_rawIn_out_sig_T_3; // @[rawFloatFromFN.scala:63:19, :70:27]
wire [2:0] _io_out_c_shift_rec_T = io_out_c_shift_rec_rawIn_sExp[8:6]; // @[recFNFromFN.scala:48:50]
wire [2:0] _io_out_c_shift_rec_T_1 = io_out_c_shift_rec_rawIn_isZero_0 ? 3'h0 : _io_out_c_shift_rec_T; // @[recFNFromFN.scala:48:{15,50}]
wire [2:0] _io_out_c_shift_rec_T_3 = {_io_out_c_shift_rec_T_1[2:1], _io_out_c_shift_rec_T_1[0] | _io_out_c_shift_rec_T_2}; // @[recFNFromFN.scala:48:{15,76}, :49:20]
wire [3:0] _io_out_c_shift_rec_T_4 = {io_out_c_shift_rec_rawIn_sign_0, _io_out_c_shift_rec_T_3}; // @[recFNFromFN.scala:47:20, :48:76]
wire [5:0] _io_out_c_shift_rec_T_5 = io_out_c_shift_rec_rawIn_sExp[5:0]; // @[recFNFromFN.scala:50:23]
wire [9:0] _io_out_c_shift_rec_T_6 = {_io_out_c_shift_rec_T_4, _io_out_c_shift_rec_T_5}; // @[recFNFromFN.scala:47:20, :49:45, :50:23]
wire [22:0] _io_out_c_shift_rec_T_7 = io_out_c_shift_rec_rawIn_sig[22:0]; // @[recFNFromFN.scala:51:22]
wire [32:0] io_out_c_shift_rec = {_io_out_c_shift_rec_T_6, _io_out_c_shift_rec_T_7}; // @[recFNFromFN.scala:49:45, :50:41, :51:22]
wire _io_out_c_T = |io_out_c_shift_exp; // @[Arithmetic.scala:442:29, :447:26]
wire _io_out_c_T_2 = ~_io_out_c_T_1; // @[Arithmetic.scala:447:15]
wire _io_out_c_T_3 = ~_io_out_c_T; // @[Arithmetic.scala:447:{15,26}]
wire [31:0] _io_out_c_result_bits_T; // @[fNFromRecFN.scala:66:12]
wire [31:0] io_out_c_result_bits; // @[Arithmetic.scala:458:26]
wire [8:0] io_out_c_result_bits_rawIn_exp = _io_out_c_muladder_io_out[31:23]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _io_out_c_result_bits_rawIn_isZero_T = io_out_c_result_bits_rawIn_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire io_out_c_result_bits_rawIn_isZero = _io_out_c_result_bits_rawIn_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
wire io_out_c_result_bits_rawIn_isZero_0 = io_out_c_result_bits_rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _io_out_c_result_bits_rawIn_isSpecial_T = io_out_c_result_bits_rawIn_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire io_out_c_result_bits_rawIn_isSpecial = &_io_out_c_result_bits_rawIn_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _io_out_c_result_bits_rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _io_out_c_result_bits_rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
wire _io_out_c_result_bits_rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [9:0] _io_out_c_result_bits_rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [24:0] _io_out_c_result_bits_rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire io_out_c_result_bits_rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire io_out_c_result_bits_rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire io_out_c_result_bits_rawIn_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [9:0] io_out_c_result_bits_rawIn_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] io_out_c_result_bits_rawIn_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _io_out_c_result_bits_rawIn_out_isNaN_T = io_out_c_result_bits_rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _io_out_c_result_bits_rawIn_out_isInf_T = io_out_c_result_bits_rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _io_out_c_result_bits_rawIn_out_isNaN_T_1 = io_out_c_result_bits_rawIn_isSpecial & _io_out_c_result_bits_rawIn_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign io_out_c_result_bits_rawIn_isNaN = _io_out_c_result_bits_rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _io_out_c_result_bits_rawIn_out_isInf_T_1 = ~_io_out_c_result_bits_rawIn_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _io_out_c_result_bits_rawIn_out_isInf_T_2 = io_out_c_result_bits_rawIn_isSpecial & _io_out_c_result_bits_rawIn_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign io_out_c_result_bits_rawIn_isInf = _io_out_c_result_bits_rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _io_out_c_result_bits_rawIn_out_sign_T = _io_out_c_muladder_io_out[32]; // @[rawFloatFromRecFN.scala:59:25]
assign io_out_c_result_bits_rawIn_sign = _io_out_c_result_bits_rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _io_out_c_result_bits_rawIn_out_sExp_T = {1'h0, io_out_c_result_bits_rawIn_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign io_out_c_result_bits_rawIn_sExp = _io_out_c_result_bits_rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _io_out_c_result_bits_rawIn_out_sig_T = ~io_out_c_result_bits_rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _io_out_c_result_bits_rawIn_out_sig_T_1 = {1'h0, _io_out_c_result_bits_rawIn_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [22:0] _io_out_c_result_bits_rawIn_out_sig_T_2 = _io_out_c_muladder_io_out[22:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _io_out_c_result_bits_rawIn_out_sig_T_3 = {_io_out_c_result_bits_rawIn_out_sig_T_1, _io_out_c_result_bits_rawIn_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign io_out_c_result_bits_rawIn_sig = _io_out_c_result_bits_rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
wire io_out_c_result_bits_isSubnormal = $signed(io_out_c_result_bits_rawIn_sExp) < 10'sh82; // @[rawFloatFromRecFN.scala:55:23]
wire [4:0] _io_out_c_result_bits_denormShiftDist_T = io_out_c_result_bits_rawIn_sExp[4:0]; // @[rawFloatFromRecFN.scala:55:23]
wire [5:0] _io_out_c_result_bits_denormShiftDist_T_1 = 6'h1 - {1'h0, _io_out_c_result_bits_denormShiftDist_T}; // @[fNFromRecFN.scala:52:{35,47}]
wire [4:0] io_out_c_result_bits_denormShiftDist = _io_out_c_result_bits_denormShiftDist_T_1[4:0]; // @[fNFromRecFN.scala:52:35]
wire [23:0] _io_out_c_result_bits_denormFract_T = io_out_c_result_bits_rawIn_sig[24:1]; // @[rawFloatFromRecFN.scala:55:23]
wire [23:0] _io_out_c_result_bits_denormFract_T_1 = _io_out_c_result_bits_denormFract_T >> io_out_c_result_bits_denormShiftDist; // @[fNFromRecFN.scala:52:35, :53:{38,42}]
wire [22:0] io_out_c_result_bits_denormFract = _io_out_c_result_bits_denormFract_T_1[22:0]; // @[fNFromRecFN.scala:53:{42,60}]
wire [7:0] _io_out_c_result_bits_expOut_T = io_out_c_result_bits_rawIn_sExp[7:0]; // @[rawFloatFromRecFN.scala:55:23]
wire [8:0] _io_out_c_result_bits_expOut_T_1 = {1'h0, _io_out_c_result_bits_expOut_T} - 9'h81; // @[fNFromRecFN.scala:58:{27,45}]
wire [7:0] _io_out_c_result_bits_expOut_T_2 = _io_out_c_result_bits_expOut_T_1[7:0]; // @[fNFromRecFN.scala:58:45]
wire [7:0] _io_out_c_result_bits_expOut_T_3 = io_out_c_result_bits_isSubnormal ? 8'h0 : _io_out_c_result_bits_expOut_T_2; // @[fNFromRecFN.scala:51:38, :56:16, :58:45]
wire _io_out_c_result_bits_expOut_T_4 = io_out_c_result_bits_rawIn_isNaN | io_out_c_result_bits_rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire [7:0] _io_out_c_result_bits_expOut_T_5 = {8{_io_out_c_result_bits_expOut_T_4}}; // @[fNFromRecFN.scala:60:{21,44}]
wire [7:0] io_out_c_result_bits_expOut = _io_out_c_result_bits_expOut_T_3 | _io_out_c_result_bits_expOut_T_5; // @[fNFromRecFN.scala:56:16, :60:{15,21}]
wire [22:0] _io_out_c_result_bits_fractOut_T = io_out_c_result_bits_rawIn_sig[22:0]; // @[rawFloatFromRecFN.scala:55:23]
wire [22:0] _io_out_c_result_bits_fractOut_T_1 = io_out_c_result_bits_rawIn_isInf ? 23'h0 : _io_out_c_result_bits_fractOut_T; // @[rawFloatFromRecFN.scala:55:23]
wire [22:0] io_out_c_result_bits_fractOut = io_out_c_result_bits_isSubnormal ? io_out_c_result_bits_denormFract : _io_out_c_result_bits_fractOut_T_1; // @[fNFromRecFN.scala:51:38, :53:60, :62:16, :64:20]
wire [8:0] io_out_c_result_bits_hi = {io_out_c_result_bits_rawIn_sign, io_out_c_result_bits_expOut}; // @[rawFloatFromRecFN.scala:55:23]
assign _io_out_c_result_bits_T = {io_out_c_result_bits_hi, io_out_c_result_bits_fractOut}; // @[fNFromRecFN.scala:62:16, :66:12]
assign io_out_c_result_bits = _io_out_c_result_bits_T; // @[fNFromRecFN.scala:66:12]
wire io_out_c_self_rec_rawIn_sign_1 = io_out_c_result_bits[31]; // @[rawFloatFromFN.scala:44:18]
wire io_out_c_self_rec_rawIn_1_sign = io_out_c_self_rec_rawIn_sign_1; // @[rawFloatFromFN.scala:44:18, :63:19]
wire [7:0] io_out_c_self_rec_rawIn_expIn_1 = io_out_c_result_bits[30:23]; // @[rawFloatFromFN.scala:45:19]
wire [22:0] io_out_c_self_rec_rawIn_fractIn_1 = io_out_c_result_bits[22:0]; // @[rawFloatFromFN.scala:46:21]
wire io_out_c_self_rec_rawIn_isZeroExpIn_1 = io_out_c_self_rec_rawIn_expIn_1 == 8'h0; // @[rawFloatFromFN.scala:45:19, :48:30]
wire io_out_c_self_rec_rawIn_isZeroFractIn_1 = io_out_c_self_rec_rawIn_fractIn_1 == 23'h0; // @[rawFloatFromFN.scala:46:21, :49:34]
wire _io_out_c_self_rec_rawIn_normDist_T_44 = io_out_c_self_rec_rawIn_fractIn_1[0]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_45 = io_out_c_self_rec_rawIn_fractIn_1[1]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_46 = io_out_c_self_rec_rawIn_fractIn_1[2]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_47 = io_out_c_self_rec_rawIn_fractIn_1[3]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_48 = io_out_c_self_rec_rawIn_fractIn_1[4]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_49 = io_out_c_self_rec_rawIn_fractIn_1[5]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_50 = io_out_c_self_rec_rawIn_fractIn_1[6]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_51 = io_out_c_self_rec_rawIn_fractIn_1[7]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_52 = io_out_c_self_rec_rawIn_fractIn_1[8]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_53 = io_out_c_self_rec_rawIn_fractIn_1[9]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_54 = io_out_c_self_rec_rawIn_fractIn_1[10]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_55 = io_out_c_self_rec_rawIn_fractIn_1[11]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_56 = io_out_c_self_rec_rawIn_fractIn_1[12]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_57 = io_out_c_self_rec_rawIn_fractIn_1[13]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_58 = io_out_c_self_rec_rawIn_fractIn_1[14]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_59 = io_out_c_self_rec_rawIn_fractIn_1[15]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_60 = io_out_c_self_rec_rawIn_fractIn_1[16]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_61 = io_out_c_self_rec_rawIn_fractIn_1[17]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_62 = io_out_c_self_rec_rawIn_fractIn_1[18]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_63 = io_out_c_self_rec_rawIn_fractIn_1[19]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_64 = io_out_c_self_rec_rawIn_fractIn_1[20]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_65 = io_out_c_self_rec_rawIn_fractIn_1[21]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_66 = io_out_c_self_rec_rawIn_fractIn_1[22]; // @[rawFloatFromFN.scala:46:21]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_67 = _io_out_c_self_rec_rawIn_normDist_T_45 ? 5'h15 : 5'h16; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_68 = _io_out_c_self_rec_rawIn_normDist_T_46 ? 5'h14 : _io_out_c_self_rec_rawIn_normDist_T_67; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_69 = _io_out_c_self_rec_rawIn_normDist_T_47 ? 5'h13 : _io_out_c_self_rec_rawIn_normDist_T_68; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_70 = _io_out_c_self_rec_rawIn_normDist_T_48 ? 5'h12 : _io_out_c_self_rec_rawIn_normDist_T_69; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_71 = _io_out_c_self_rec_rawIn_normDist_T_49 ? 5'h11 : _io_out_c_self_rec_rawIn_normDist_T_70; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_72 = _io_out_c_self_rec_rawIn_normDist_T_50 ? 5'h10 : _io_out_c_self_rec_rawIn_normDist_T_71; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_73 = _io_out_c_self_rec_rawIn_normDist_T_51 ? 5'hF : _io_out_c_self_rec_rawIn_normDist_T_72; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_74 = _io_out_c_self_rec_rawIn_normDist_T_52 ? 5'hE : _io_out_c_self_rec_rawIn_normDist_T_73; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_75 = _io_out_c_self_rec_rawIn_normDist_T_53 ? 5'hD : _io_out_c_self_rec_rawIn_normDist_T_74; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_76 = _io_out_c_self_rec_rawIn_normDist_T_54 ? 5'hC : _io_out_c_self_rec_rawIn_normDist_T_75; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_77 = _io_out_c_self_rec_rawIn_normDist_T_55 ? 5'hB : _io_out_c_self_rec_rawIn_normDist_T_76; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_78 = _io_out_c_self_rec_rawIn_normDist_T_56 ? 5'hA : _io_out_c_self_rec_rawIn_normDist_T_77; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_79 = _io_out_c_self_rec_rawIn_normDist_T_57 ? 5'h9 : _io_out_c_self_rec_rawIn_normDist_T_78; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_80 = _io_out_c_self_rec_rawIn_normDist_T_58 ? 5'h8 : _io_out_c_self_rec_rawIn_normDist_T_79; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_81 = _io_out_c_self_rec_rawIn_normDist_T_59 ? 5'h7 : _io_out_c_self_rec_rawIn_normDist_T_80; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_82 = _io_out_c_self_rec_rawIn_normDist_T_60 ? 5'h6 : _io_out_c_self_rec_rawIn_normDist_T_81; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_83 = _io_out_c_self_rec_rawIn_normDist_T_61 ? 5'h5 : _io_out_c_self_rec_rawIn_normDist_T_82; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_84 = _io_out_c_self_rec_rawIn_normDist_T_62 ? 5'h4 : _io_out_c_self_rec_rawIn_normDist_T_83; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_85 = _io_out_c_self_rec_rawIn_normDist_T_63 ? 5'h3 : _io_out_c_self_rec_rawIn_normDist_T_84; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_86 = _io_out_c_self_rec_rawIn_normDist_T_64 ? 5'h2 : _io_out_c_self_rec_rawIn_normDist_T_85; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_87 = _io_out_c_self_rec_rawIn_normDist_T_65 ? 5'h1 : _io_out_c_self_rec_rawIn_normDist_T_86; // @[Mux.scala:50:70]
wire [4:0] io_out_c_self_rec_rawIn_normDist_1 = _io_out_c_self_rec_rawIn_normDist_T_66 ? 5'h0 : _io_out_c_self_rec_rawIn_normDist_T_87; // @[Mux.scala:50:70]
wire [53:0] _io_out_c_self_rec_rawIn_subnormFract_T_2 = {31'h0, io_out_c_self_rec_rawIn_fractIn_1} << io_out_c_self_rec_rawIn_normDist_1; // @[Mux.scala:50:70]
wire [21:0] _io_out_c_self_rec_rawIn_subnormFract_T_3 = _io_out_c_self_rec_rawIn_subnormFract_T_2[21:0]; // @[rawFloatFromFN.scala:52:{33,46}]
wire [22:0] io_out_c_self_rec_rawIn_subnormFract_1 = {_io_out_c_self_rec_rawIn_subnormFract_T_3, 1'h0}; // @[rawFloatFromFN.scala:52:{46,64}]
wire [8:0] _io_out_c_self_rec_rawIn_adjustedExp_T_5 = {4'hF, ~io_out_c_self_rec_rawIn_normDist_1}; // @[Mux.scala:50:70]
wire [8:0] _io_out_c_self_rec_rawIn_adjustedExp_T_6 = io_out_c_self_rec_rawIn_isZeroExpIn_1 ? _io_out_c_self_rec_rawIn_adjustedExp_T_5 : {1'h0, io_out_c_self_rec_rawIn_expIn_1}; // @[rawFloatFromFN.scala:45:19, :48:30, :54:10, :55:18]
wire [1:0] _io_out_c_self_rec_rawIn_adjustedExp_T_7 = io_out_c_self_rec_rawIn_isZeroExpIn_1 ? 2'h2 : 2'h1; // @[rawFloatFromFN.scala:48:30, :58:14]
wire [7:0] _io_out_c_self_rec_rawIn_adjustedExp_T_8 = {6'h20, _io_out_c_self_rec_rawIn_adjustedExp_T_7}; // @[rawFloatFromFN.scala:58:{9,14}]
wire [9:0] _io_out_c_self_rec_rawIn_adjustedExp_T_9 = {1'h0, _io_out_c_self_rec_rawIn_adjustedExp_T_6} + {2'h0, _io_out_c_self_rec_rawIn_adjustedExp_T_8}; // @[rawFloatFromFN.scala:54:10, :57:9, :58:9]
wire [8:0] io_out_c_self_rec_rawIn_adjustedExp_1 = _io_out_c_self_rec_rawIn_adjustedExp_T_9[8:0]; // @[rawFloatFromFN.scala:57:9]
wire [8:0] _io_out_c_self_rec_rawIn_out_sExp_T_2 = io_out_c_self_rec_rawIn_adjustedExp_1; // @[rawFloatFromFN.scala:57:9, :68:28]
wire io_out_c_self_rec_rawIn_isZero_1 = io_out_c_self_rec_rawIn_isZeroExpIn_1 & io_out_c_self_rec_rawIn_isZeroFractIn_1; // @[rawFloatFromFN.scala:48:30, :49:34, :60:30]
wire io_out_c_self_rec_rawIn_1_isZero = io_out_c_self_rec_rawIn_isZero_1; // @[rawFloatFromFN.scala:60:30, :63:19]
wire [1:0] _io_out_c_self_rec_rawIn_isSpecial_T_1 = io_out_c_self_rec_rawIn_adjustedExp_1[8:7]; // @[rawFloatFromFN.scala:57:9, :61:32]
wire io_out_c_self_rec_rawIn_isSpecial_1 = &_io_out_c_self_rec_rawIn_isSpecial_T_1; // @[rawFloatFromFN.scala:61:{32,57}]
wire _io_out_c_self_rec_rawIn_out_isNaN_T_3; // @[rawFloatFromFN.scala:64:28]
wire _io_out_c_self_rec_rawIn_out_isInf_T_1; // @[rawFloatFromFN.scala:65:28]
wire _io_out_c_self_rec_T_10 = io_out_c_self_rec_rawIn_1_isNaN; // @[recFNFromFN.scala:49:20]
wire [9:0] _io_out_c_self_rec_rawIn_out_sExp_T_3; // @[rawFloatFromFN.scala:68:42]
wire [24:0] _io_out_c_self_rec_rawIn_out_sig_T_7; // @[rawFloatFromFN.scala:70:27]
wire io_out_c_self_rec_rawIn_1_isInf; // @[rawFloatFromFN.scala:63:19]
wire [9:0] io_out_c_self_rec_rawIn_1_sExp; // @[rawFloatFromFN.scala:63:19]
wire [24:0] io_out_c_self_rec_rawIn_1_sig; // @[rawFloatFromFN.scala:63:19]
wire _io_out_c_self_rec_rawIn_out_isNaN_T_2 = ~io_out_c_self_rec_rawIn_isZeroFractIn_1; // @[rawFloatFromFN.scala:49:34, :64:31]
assign _io_out_c_self_rec_rawIn_out_isNaN_T_3 = io_out_c_self_rec_rawIn_isSpecial_1 & _io_out_c_self_rec_rawIn_out_isNaN_T_2; // @[rawFloatFromFN.scala:61:57, :64:{28,31}]
assign io_out_c_self_rec_rawIn_1_isNaN = _io_out_c_self_rec_rawIn_out_isNaN_T_3; // @[rawFloatFromFN.scala:63:19, :64:28]
assign _io_out_c_self_rec_rawIn_out_isInf_T_1 = io_out_c_self_rec_rawIn_isSpecial_1 & io_out_c_self_rec_rawIn_isZeroFractIn_1; // @[rawFloatFromFN.scala:49:34, :61:57, :65:28]
assign io_out_c_self_rec_rawIn_1_isInf = _io_out_c_self_rec_rawIn_out_isInf_T_1; // @[rawFloatFromFN.scala:63:19, :65:28]
assign _io_out_c_self_rec_rawIn_out_sExp_T_3 = {1'h0, _io_out_c_self_rec_rawIn_out_sExp_T_2}; // @[rawFloatFromFN.scala:68:{28,42}]
assign io_out_c_self_rec_rawIn_1_sExp = _io_out_c_self_rec_rawIn_out_sExp_T_3; // @[rawFloatFromFN.scala:63:19, :68:42]
wire _io_out_c_self_rec_rawIn_out_sig_T_4 = ~io_out_c_self_rec_rawIn_isZero_1; // @[rawFloatFromFN.scala:60:30, :70:19]
wire [1:0] _io_out_c_self_rec_rawIn_out_sig_T_5 = {1'h0, _io_out_c_self_rec_rawIn_out_sig_T_4}; // @[rawFloatFromFN.scala:70:{16,19}]
wire [22:0] _io_out_c_self_rec_rawIn_out_sig_T_6 = io_out_c_self_rec_rawIn_isZeroExpIn_1 ? io_out_c_self_rec_rawIn_subnormFract_1 : io_out_c_self_rec_rawIn_fractIn_1; // @[rawFloatFromFN.scala:46:21, :48:30, :52:64, :70:33]
assign _io_out_c_self_rec_rawIn_out_sig_T_7 = {_io_out_c_self_rec_rawIn_out_sig_T_5, _io_out_c_self_rec_rawIn_out_sig_T_6}; // @[rawFloatFromFN.scala:70:{16,27,33}]
assign io_out_c_self_rec_rawIn_1_sig = _io_out_c_self_rec_rawIn_out_sig_T_7; // @[rawFloatFromFN.scala:63:19, :70:27]
wire [2:0] _io_out_c_self_rec_T_8 = io_out_c_self_rec_rawIn_1_sExp[8:6]; // @[recFNFromFN.scala:48:50]
wire [2:0] _io_out_c_self_rec_T_9 = io_out_c_self_rec_rawIn_1_isZero ? 3'h0 : _io_out_c_self_rec_T_8; // @[recFNFromFN.scala:48:{15,50}]
wire [2:0] _io_out_c_self_rec_T_11 = {_io_out_c_self_rec_T_9[2:1], _io_out_c_self_rec_T_9[0] | _io_out_c_self_rec_T_10}; // @[recFNFromFN.scala:48:{15,76}, :49:20]
wire [3:0] _io_out_c_self_rec_T_12 = {io_out_c_self_rec_rawIn_1_sign, _io_out_c_self_rec_T_11}; // @[recFNFromFN.scala:47:20, :48:76]
wire [5:0] _io_out_c_self_rec_T_13 = io_out_c_self_rec_rawIn_1_sExp[5:0]; // @[recFNFromFN.scala:50:23]
wire [9:0] _io_out_c_self_rec_T_14 = {_io_out_c_self_rec_T_12, _io_out_c_self_rec_T_13}; // @[recFNFromFN.scala:47:20, :49:45, :50:23]
wire [22:0] _io_out_c_self_rec_T_15 = io_out_c_self_rec_rawIn_1_sig[22:0]; // @[recFNFromFN.scala:51:22]
wire [32:0] io_out_c_self_rec_1 = {_io_out_c_self_rec_T_14, _io_out_c_self_rec_T_15}; // @[recFNFromFN.scala:49:45, :50:41, :51:22]
wire [31:0] _io_out_c_result_bits_T_1; // @[fNFromRecFN.scala:66:12]
wire [31:0] io_out_c_result_1_bits; // @[Arithmetic.scala:505:26]
wire [8:0] io_out_c_result_bits_rawIn_exp_1 = _io_out_c_resizer_io_out[31:23]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _io_out_c_result_bits_rawIn_isZero_T_1 = io_out_c_result_bits_rawIn_exp_1[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire io_out_c_result_bits_rawIn_isZero_1 = _io_out_c_result_bits_rawIn_isZero_T_1 == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
wire io_out_c_result_bits_rawIn_1_isZero = io_out_c_result_bits_rawIn_isZero_1; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _io_out_c_result_bits_rawIn_isSpecial_T_1 = io_out_c_result_bits_rawIn_exp_1[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire io_out_c_result_bits_rawIn_isSpecial_1 = &_io_out_c_result_bits_rawIn_isSpecial_T_1; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _io_out_c_result_bits_rawIn_out_isNaN_T_3; // @[rawFloatFromRecFN.scala:56:33]
wire _io_out_c_result_bits_rawIn_out_isInf_T_5; // @[rawFloatFromRecFN.scala:57:33]
wire _io_out_c_result_bits_rawIn_out_sign_T_1; // @[rawFloatFromRecFN.scala:59:25]
wire [9:0] _io_out_c_result_bits_rawIn_out_sExp_T_1; // @[rawFloatFromRecFN.scala:60:27]
wire [24:0] _io_out_c_result_bits_rawIn_out_sig_T_7; // @[rawFloatFromRecFN.scala:61:44]
wire io_out_c_result_bits_rawIn_1_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire io_out_c_result_bits_rawIn_1_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire io_out_c_result_bits_rawIn_1_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [9:0] io_out_c_result_bits_rawIn_1_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] io_out_c_result_bits_rawIn_1_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _io_out_c_result_bits_rawIn_out_isNaN_T_2 = io_out_c_result_bits_rawIn_exp_1[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _io_out_c_result_bits_rawIn_out_isInf_T_3 = io_out_c_result_bits_rawIn_exp_1[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _io_out_c_result_bits_rawIn_out_isNaN_T_3 = io_out_c_result_bits_rawIn_isSpecial_1 & _io_out_c_result_bits_rawIn_out_isNaN_T_2; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign io_out_c_result_bits_rawIn_1_isNaN = _io_out_c_result_bits_rawIn_out_isNaN_T_3; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _io_out_c_result_bits_rawIn_out_isInf_T_4 = ~_io_out_c_result_bits_rawIn_out_isInf_T_3; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _io_out_c_result_bits_rawIn_out_isInf_T_5 = io_out_c_result_bits_rawIn_isSpecial_1 & _io_out_c_result_bits_rawIn_out_isInf_T_4; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign io_out_c_result_bits_rawIn_1_isInf = _io_out_c_result_bits_rawIn_out_isInf_T_5; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _io_out_c_result_bits_rawIn_out_sign_T_1 = _io_out_c_resizer_io_out[32]; // @[rawFloatFromRecFN.scala:59:25]
assign io_out_c_result_bits_rawIn_1_sign = _io_out_c_result_bits_rawIn_out_sign_T_1; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _io_out_c_result_bits_rawIn_out_sExp_T_1 = {1'h0, io_out_c_result_bits_rawIn_exp_1}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign io_out_c_result_bits_rawIn_1_sExp = _io_out_c_result_bits_rawIn_out_sExp_T_1; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _io_out_c_result_bits_rawIn_out_sig_T_4 = ~io_out_c_result_bits_rawIn_isZero_1; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _io_out_c_result_bits_rawIn_out_sig_T_5 = {1'h0, _io_out_c_result_bits_rawIn_out_sig_T_4}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [22:0] _io_out_c_result_bits_rawIn_out_sig_T_6 = _io_out_c_resizer_io_out[22:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _io_out_c_result_bits_rawIn_out_sig_T_7 = {_io_out_c_result_bits_rawIn_out_sig_T_5, _io_out_c_result_bits_rawIn_out_sig_T_6}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign io_out_c_result_bits_rawIn_1_sig = _io_out_c_result_bits_rawIn_out_sig_T_7; // @[rawFloatFromRecFN.scala:55:23, :61:44]
wire io_out_c_result_bits_isSubnormal_1 = $signed(io_out_c_result_bits_rawIn_1_sExp) < 10'sh82; // @[rawFloatFromRecFN.scala:55:23]
wire [4:0] _io_out_c_result_bits_denormShiftDist_T_2 = io_out_c_result_bits_rawIn_1_sExp[4:0]; // @[rawFloatFromRecFN.scala:55:23]
wire [5:0] _io_out_c_result_bits_denormShiftDist_T_3 = 6'h1 - {1'h0, _io_out_c_result_bits_denormShiftDist_T_2}; // @[fNFromRecFN.scala:52:{35,47}]
wire [4:0] io_out_c_result_bits_denormShiftDist_1 = _io_out_c_result_bits_denormShiftDist_T_3[4:0]; // @[fNFromRecFN.scala:52:35]
wire [23:0] _io_out_c_result_bits_denormFract_T_2 = io_out_c_result_bits_rawIn_1_sig[24:1]; // @[rawFloatFromRecFN.scala:55:23]
wire [23:0] _io_out_c_result_bits_denormFract_T_3 = _io_out_c_result_bits_denormFract_T_2 >> io_out_c_result_bits_denormShiftDist_1; // @[fNFromRecFN.scala:52:35, :53:{38,42}]
wire [22:0] io_out_c_result_bits_denormFract_1 = _io_out_c_result_bits_denormFract_T_3[22:0]; // @[fNFromRecFN.scala:53:{42,60}]
wire [7:0] _io_out_c_result_bits_expOut_T_6 = io_out_c_result_bits_rawIn_1_sExp[7:0]; // @[rawFloatFromRecFN.scala:55:23]
wire [8:0] _io_out_c_result_bits_expOut_T_7 = {1'h0, _io_out_c_result_bits_expOut_T_6} - 9'h81; // @[fNFromRecFN.scala:58:{27,45}]
wire [7:0] _io_out_c_result_bits_expOut_T_8 = _io_out_c_result_bits_expOut_T_7[7:0]; // @[fNFromRecFN.scala:58:45]
wire [7:0] _io_out_c_result_bits_expOut_T_9 = io_out_c_result_bits_isSubnormal_1 ? 8'h0 : _io_out_c_result_bits_expOut_T_8; // @[fNFromRecFN.scala:51:38, :56:16, :58:45]
wire _io_out_c_result_bits_expOut_T_10 = io_out_c_result_bits_rawIn_1_isNaN | io_out_c_result_bits_rawIn_1_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire [7:0] _io_out_c_result_bits_expOut_T_11 = {8{_io_out_c_result_bits_expOut_T_10}}; // @[fNFromRecFN.scala:60:{21,44}]
wire [7:0] io_out_c_result_bits_expOut_1 = _io_out_c_result_bits_expOut_T_9 | _io_out_c_result_bits_expOut_T_11; // @[fNFromRecFN.scala:56:16, :60:{15,21}]
wire [22:0] _io_out_c_result_bits_fractOut_T_2 = io_out_c_result_bits_rawIn_1_sig[22:0]; // @[rawFloatFromRecFN.scala:55:23]
wire [22:0] _io_out_c_result_bits_fractOut_T_3 = io_out_c_result_bits_rawIn_1_isInf ? 23'h0 : _io_out_c_result_bits_fractOut_T_2; // @[rawFloatFromRecFN.scala:55:23]
wire [22:0] io_out_c_result_bits_fractOut_1 = io_out_c_result_bits_isSubnormal_1 ? io_out_c_result_bits_denormFract_1 : _io_out_c_result_bits_fractOut_T_3; // @[fNFromRecFN.scala:51:38, :53:60, :62:16, :64:20]
wire [8:0] io_out_c_result_bits_hi_1 = {io_out_c_result_bits_rawIn_1_sign, io_out_c_result_bits_expOut_1}; // @[rawFloatFromRecFN.scala:55:23]
assign _io_out_c_result_bits_T_1 = {io_out_c_result_bits_hi_1, io_out_c_result_bits_fractOut_1}; // @[fNFromRecFN.scala:62:16, :66:12]
assign io_out_c_result_1_bits = _io_out_c_result_bits_T_1; // @[fNFromRecFN.scala:66:12]
wire [31:0] _mac_unit_io_in_b_T; // @[PE.scala:106:37]
assign _mac_unit_io_in_b_T = _mac_unit_io_in_b_WIRE_1; // @[PE.scala:106:37]
wire [31:0] _mac_unit_io_in_b_WIRE_bits = _mac_unit_io_in_b_T; // @[PE.scala:106:37]
wire c1_self_rec_rawIn_sign = io_in_d_bits_0[31]; // @[rawFloatFromFN.scala:44:18]
wire c2_self_rec_rawIn_sign = io_in_d_bits_0[31]; // @[rawFloatFromFN.scala:44:18]
wire c1_self_rec_rawIn_sign_0 = c1_self_rec_rawIn_sign; // @[rawFloatFromFN.scala:44:18, :63:19]
wire [7:0] c1_self_rec_rawIn_expIn = io_in_d_bits_0[30:23]; // @[rawFloatFromFN.scala:45:19]
wire [7:0] c2_self_rec_rawIn_expIn = io_in_d_bits_0[30:23]; // @[rawFloatFromFN.scala:45:19]
wire [22:0] c1_self_rec_rawIn_fractIn = io_in_d_bits_0[22:0]; // @[rawFloatFromFN.scala:46:21]
wire [22:0] c2_self_rec_rawIn_fractIn = io_in_d_bits_0[22:0]; // @[rawFloatFromFN.scala:46:21]
wire c1_self_rec_rawIn_isZeroExpIn = c1_self_rec_rawIn_expIn == 8'h0; // @[rawFloatFromFN.scala:45:19, :48:30]
wire c1_self_rec_rawIn_isZeroFractIn = c1_self_rec_rawIn_fractIn == 23'h0; // @[rawFloatFromFN.scala:46:21, :49:34]
wire _c1_self_rec_rawIn_normDist_T = c1_self_rec_rawIn_fractIn[0]; // @[rawFloatFromFN.scala:46:21]
wire _c1_self_rec_rawIn_normDist_T_1 = c1_self_rec_rawIn_fractIn[1]; // @[rawFloatFromFN.scala:46:21]
wire _c1_self_rec_rawIn_normDist_T_2 = c1_self_rec_rawIn_fractIn[2]; // @[rawFloatFromFN.scala:46:21]
wire _c1_self_rec_rawIn_normDist_T_3 = c1_self_rec_rawIn_fractIn[3]; // @[rawFloatFromFN.scala:46:21]
wire _c1_self_rec_rawIn_normDist_T_4 = c1_self_rec_rawIn_fractIn[4]; // @[rawFloatFromFN.scala:46:21]
wire _c1_self_rec_rawIn_normDist_T_5 = c1_self_rec_rawIn_fractIn[5]; // @[rawFloatFromFN.scala:46:21]
wire _c1_self_rec_rawIn_normDist_T_6 = c1_self_rec_rawIn_fractIn[6]; // @[rawFloatFromFN.scala:46:21]
wire _c1_self_rec_rawIn_normDist_T_7 = c1_self_rec_rawIn_fractIn[7]; // @[rawFloatFromFN.scala:46:21]
wire _c1_self_rec_rawIn_normDist_T_8 = c1_self_rec_rawIn_fractIn[8]; // @[rawFloatFromFN.scala:46:21]
wire _c1_self_rec_rawIn_normDist_T_9 = c1_self_rec_rawIn_fractIn[9]; // @[rawFloatFromFN.scala:46:21]
wire _c1_self_rec_rawIn_normDist_T_10 = c1_self_rec_rawIn_fractIn[10]; // @[rawFloatFromFN.scala:46:21]
wire _c1_self_rec_rawIn_normDist_T_11 = c1_self_rec_rawIn_fractIn[11]; // @[rawFloatFromFN.scala:46:21]
wire _c1_self_rec_rawIn_normDist_T_12 = c1_self_rec_rawIn_fractIn[12]; // @[rawFloatFromFN.scala:46:21]
wire _c1_self_rec_rawIn_normDist_T_13 = c1_self_rec_rawIn_fractIn[13]; // @[rawFloatFromFN.scala:46:21]
wire _c1_self_rec_rawIn_normDist_T_14 = c1_self_rec_rawIn_fractIn[14]; // @[rawFloatFromFN.scala:46:21]
wire _c1_self_rec_rawIn_normDist_T_15 = c1_self_rec_rawIn_fractIn[15]; // @[rawFloatFromFN.scala:46:21]
wire _c1_self_rec_rawIn_normDist_T_16 = c1_self_rec_rawIn_fractIn[16]; // @[rawFloatFromFN.scala:46:21]
wire _c1_self_rec_rawIn_normDist_T_17 = c1_self_rec_rawIn_fractIn[17]; // @[rawFloatFromFN.scala:46:21]
wire _c1_self_rec_rawIn_normDist_T_18 = c1_self_rec_rawIn_fractIn[18]; // @[rawFloatFromFN.scala:46:21]
wire _c1_self_rec_rawIn_normDist_T_19 = c1_self_rec_rawIn_fractIn[19]; // @[rawFloatFromFN.scala:46:21]
wire _c1_self_rec_rawIn_normDist_T_20 = c1_self_rec_rawIn_fractIn[20]; // @[rawFloatFromFN.scala:46:21]
wire _c1_self_rec_rawIn_normDist_T_21 = c1_self_rec_rawIn_fractIn[21]; // @[rawFloatFromFN.scala:46:21]
wire _c1_self_rec_rawIn_normDist_T_22 = c1_self_rec_rawIn_fractIn[22]; // @[rawFloatFromFN.scala:46:21]
wire [4:0] _c1_self_rec_rawIn_normDist_T_23 = _c1_self_rec_rawIn_normDist_T_1 ? 5'h15 : 5'h16; // @[Mux.scala:50:70]
wire [4:0] _c1_self_rec_rawIn_normDist_T_24 = _c1_self_rec_rawIn_normDist_T_2 ? 5'h14 : _c1_self_rec_rawIn_normDist_T_23; // @[Mux.scala:50:70]
wire [4:0] _c1_self_rec_rawIn_normDist_T_25 = _c1_self_rec_rawIn_normDist_T_3 ? 5'h13 : _c1_self_rec_rawIn_normDist_T_24; // @[Mux.scala:50:70]
wire [4:0] _c1_self_rec_rawIn_normDist_T_26 = _c1_self_rec_rawIn_normDist_T_4 ? 5'h12 : _c1_self_rec_rawIn_normDist_T_25; // @[Mux.scala:50:70]
wire [4:0] _c1_self_rec_rawIn_normDist_T_27 = _c1_self_rec_rawIn_normDist_T_5 ? 5'h11 : _c1_self_rec_rawIn_normDist_T_26; // @[Mux.scala:50:70]
wire [4:0] _c1_self_rec_rawIn_normDist_T_28 = _c1_self_rec_rawIn_normDist_T_6 ? 5'h10 : _c1_self_rec_rawIn_normDist_T_27; // @[Mux.scala:50:70]
wire [4:0] _c1_self_rec_rawIn_normDist_T_29 = _c1_self_rec_rawIn_normDist_T_7 ? 5'hF : _c1_self_rec_rawIn_normDist_T_28; // @[Mux.scala:50:70]
wire [4:0] _c1_self_rec_rawIn_normDist_T_30 = _c1_self_rec_rawIn_normDist_T_8 ? 5'hE : _c1_self_rec_rawIn_normDist_T_29; // @[Mux.scala:50:70]
wire [4:0] _c1_self_rec_rawIn_normDist_T_31 = _c1_self_rec_rawIn_normDist_T_9 ? 5'hD : _c1_self_rec_rawIn_normDist_T_30; // @[Mux.scala:50:70]
wire [4:0] _c1_self_rec_rawIn_normDist_T_32 = _c1_self_rec_rawIn_normDist_T_10 ? 5'hC : _c1_self_rec_rawIn_normDist_T_31; // @[Mux.scala:50:70]
wire [4:0] _c1_self_rec_rawIn_normDist_T_33 = _c1_self_rec_rawIn_normDist_T_11 ? 5'hB : _c1_self_rec_rawIn_normDist_T_32; // @[Mux.scala:50:70]
wire [4:0] _c1_self_rec_rawIn_normDist_T_34 = _c1_self_rec_rawIn_normDist_T_12 ? 5'hA : _c1_self_rec_rawIn_normDist_T_33; // @[Mux.scala:50:70]
wire [4:0] _c1_self_rec_rawIn_normDist_T_35 = _c1_self_rec_rawIn_normDist_T_13 ? 5'h9 : _c1_self_rec_rawIn_normDist_T_34; // @[Mux.scala:50:70]
wire [4:0] _c1_self_rec_rawIn_normDist_T_36 = _c1_self_rec_rawIn_normDist_T_14 ? 5'h8 : _c1_self_rec_rawIn_normDist_T_35; // @[Mux.scala:50:70]
wire [4:0] _c1_self_rec_rawIn_normDist_T_37 = _c1_self_rec_rawIn_normDist_T_15 ? 5'h7 : _c1_self_rec_rawIn_normDist_T_36; // @[Mux.scala:50:70]
wire [4:0] _c1_self_rec_rawIn_normDist_T_38 = _c1_self_rec_rawIn_normDist_T_16 ? 5'h6 : _c1_self_rec_rawIn_normDist_T_37; // @[Mux.scala:50:70]
wire [4:0] _c1_self_rec_rawIn_normDist_T_39 = _c1_self_rec_rawIn_normDist_T_17 ? 5'h5 : _c1_self_rec_rawIn_normDist_T_38; // @[Mux.scala:50:70]
wire [4:0] _c1_self_rec_rawIn_normDist_T_40 = _c1_self_rec_rawIn_normDist_T_18 ? 5'h4 : _c1_self_rec_rawIn_normDist_T_39; // @[Mux.scala:50:70]
wire [4:0] _c1_self_rec_rawIn_normDist_T_41 = _c1_self_rec_rawIn_normDist_T_19 ? 5'h3 : _c1_self_rec_rawIn_normDist_T_40; // @[Mux.scala:50:70]
wire [4:0] _c1_self_rec_rawIn_normDist_T_42 = _c1_self_rec_rawIn_normDist_T_20 ? 5'h2 : _c1_self_rec_rawIn_normDist_T_41; // @[Mux.scala:50:70]
wire [4:0] _c1_self_rec_rawIn_normDist_T_43 = _c1_self_rec_rawIn_normDist_T_21 ? 5'h1 : _c1_self_rec_rawIn_normDist_T_42; // @[Mux.scala:50:70]
wire [4:0] c1_self_rec_rawIn_normDist = _c1_self_rec_rawIn_normDist_T_22 ? 5'h0 : _c1_self_rec_rawIn_normDist_T_43; // @[Mux.scala:50:70]
wire [53:0] _c1_self_rec_rawIn_subnormFract_T = {31'h0, c1_self_rec_rawIn_fractIn} << c1_self_rec_rawIn_normDist; // @[Mux.scala:50:70]
wire [21:0] _c1_self_rec_rawIn_subnormFract_T_1 = _c1_self_rec_rawIn_subnormFract_T[21:0]; // @[rawFloatFromFN.scala:52:{33,46}]
wire [22:0] c1_self_rec_rawIn_subnormFract = {_c1_self_rec_rawIn_subnormFract_T_1, 1'h0}; // @[rawFloatFromFN.scala:52:{46,64}]
wire [8:0] _c1_self_rec_rawIn_adjustedExp_T = {4'hF, ~c1_self_rec_rawIn_normDist}; // @[Mux.scala:50:70]
wire [8:0] _c1_self_rec_rawIn_adjustedExp_T_1 = c1_self_rec_rawIn_isZeroExpIn ? _c1_self_rec_rawIn_adjustedExp_T : {1'h0, c1_self_rec_rawIn_expIn}; // @[rawFloatFromFN.scala:45:19, :48:30, :54:10, :55:18]
wire [1:0] _c1_self_rec_rawIn_adjustedExp_T_2 = c1_self_rec_rawIn_isZeroExpIn ? 2'h2 : 2'h1; // @[rawFloatFromFN.scala:48:30, :58:14]
wire [7:0] _c1_self_rec_rawIn_adjustedExp_T_3 = {6'h20, _c1_self_rec_rawIn_adjustedExp_T_2}; // @[rawFloatFromFN.scala:58:{9,14}]
wire [9:0] _c1_self_rec_rawIn_adjustedExp_T_4 = {1'h0, _c1_self_rec_rawIn_adjustedExp_T_1} + {2'h0, _c1_self_rec_rawIn_adjustedExp_T_3}; // @[rawFloatFromFN.scala:54:10, :57:9, :58:9]
wire [8:0] c1_self_rec_rawIn_adjustedExp = _c1_self_rec_rawIn_adjustedExp_T_4[8:0]; // @[rawFloatFromFN.scala:57:9]
wire [8:0] _c1_self_rec_rawIn_out_sExp_T = c1_self_rec_rawIn_adjustedExp; // @[rawFloatFromFN.scala:57:9, :68:28]
wire c1_self_rec_rawIn_isZero = c1_self_rec_rawIn_isZeroExpIn & c1_self_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:48:30, :49:34, :60:30]
wire c1_self_rec_rawIn_isZero_0 = c1_self_rec_rawIn_isZero; // @[rawFloatFromFN.scala:60:30, :63:19]
wire [1:0] _c1_self_rec_rawIn_isSpecial_T = c1_self_rec_rawIn_adjustedExp[8:7]; // @[rawFloatFromFN.scala:57:9, :61:32]
wire c1_self_rec_rawIn_isSpecial = &_c1_self_rec_rawIn_isSpecial_T; // @[rawFloatFromFN.scala:61:{32,57}]
wire _c1_self_rec_rawIn_out_isNaN_T_1; // @[rawFloatFromFN.scala:64:28]
wire _c1_self_rec_rawIn_out_isInf_T; // @[rawFloatFromFN.scala:65:28]
wire _c1_self_rec_T_2 = c1_self_rec_rawIn_isNaN; // @[recFNFromFN.scala:49:20]
wire [9:0] _c1_self_rec_rawIn_out_sExp_T_1; // @[rawFloatFromFN.scala:68:42]
wire [24:0] _c1_self_rec_rawIn_out_sig_T_3; // @[rawFloatFromFN.scala:70:27]
wire c1_self_rec_rawIn_isInf; // @[rawFloatFromFN.scala:63:19]
wire [9:0] c1_self_rec_rawIn_sExp; // @[rawFloatFromFN.scala:63:19]
wire [24:0] c1_self_rec_rawIn_sig; // @[rawFloatFromFN.scala:63:19]
wire _c1_self_rec_rawIn_out_isNaN_T = ~c1_self_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:49:34, :64:31]
assign _c1_self_rec_rawIn_out_isNaN_T_1 = c1_self_rec_rawIn_isSpecial & _c1_self_rec_rawIn_out_isNaN_T; // @[rawFloatFromFN.scala:61:57, :64:{28,31}]
assign c1_self_rec_rawIn_isNaN = _c1_self_rec_rawIn_out_isNaN_T_1; // @[rawFloatFromFN.scala:63:19, :64:28]
assign _c1_self_rec_rawIn_out_isInf_T = c1_self_rec_rawIn_isSpecial & c1_self_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:49:34, :61:57, :65:28]
assign c1_self_rec_rawIn_isInf = _c1_self_rec_rawIn_out_isInf_T; // @[rawFloatFromFN.scala:63:19, :65:28]
assign _c1_self_rec_rawIn_out_sExp_T_1 = {1'h0, _c1_self_rec_rawIn_out_sExp_T}; // @[rawFloatFromFN.scala:68:{28,42}]
assign c1_self_rec_rawIn_sExp = _c1_self_rec_rawIn_out_sExp_T_1; // @[rawFloatFromFN.scala:63:19, :68:42]
wire _c1_self_rec_rawIn_out_sig_T = ~c1_self_rec_rawIn_isZero; // @[rawFloatFromFN.scala:60:30, :70:19]
wire [1:0] _c1_self_rec_rawIn_out_sig_T_1 = {1'h0, _c1_self_rec_rawIn_out_sig_T}; // @[rawFloatFromFN.scala:70:{16,19}]
wire [22:0] _c1_self_rec_rawIn_out_sig_T_2 = c1_self_rec_rawIn_isZeroExpIn ? c1_self_rec_rawIn_subnormFract : c1_self_rec_rawIn_fractIn; // @[rawFloatFromFN.scala:46:21, :48:30, :52:64, :70:33]
assign _c1_self_rec_rawIn_out_sig_T_3 = {_c1_self_rec_rawIn_out_sig_T_1, _c1_self_rec_rawIn_out_sig_T_2}; // @[rawFloatFromFN.scala:70:{16,27,33}]
assign c1_self_rec_rawIn_sig = _c1_self_rec_rawIn_out_sig_T_3; // @[rawFloatFromFN.scala:63:19, :70:27]
wire [2:0] _c1_self_rec_T = c1_self_rec_rawIn_sExp[8:6]; // @[recFNFromFN.scala:48:50]
wire [2:0] _c1_self_rec_T_1 = c1_self_rec_rawIn_isZero_0 ? 3'h0 : _c1_self_rec_T; // @[recFNFromFN.scala:48:{15,50}]
wire [2:0] _c1_self_rec_T_3 = {_c1_self_rec_T_1[2:1], _c1_self_rec_T_1[0] | _c1_self_rec_T_2}; // @[recFNFromFN.scala:48:{15,76}, :49:20]
wire [3:0] _c1_self_rec_T_4 = {c1_self_rec_rawIn_sign_0, _c1_self_rec_T_3}; // @[recFNFromFN.scala:47:20, :48:76]
wire [5:0] _c1_self_rec_T_5 = c1_self_rec_rawIn_sExp[5:0]; // @[recFNFromFN.scala:50:23]
wire [9:0] _c1_self_rec_T_6 = {_c1_self_rec_T_4, _c1_self_rec_T_5}; // @[recFNFromFN.scala:47:20, :49:45, :50:23]
wire [22:0] _c1_self_rec_T_7 = c1_self_rec_rawIn_sig[22:0]; // @[recFNFromFN.scala:51:22]
wire [32:0] c1_self_rec = {_c1_self_rec_T_6, _c1_self_rec_T_7}; // @[recFNFromFN.scala:49:45, :50:41, :51:22]
wire [31:0] _c1_result_bits_T; // @[fNFromRecFN.scala:66:12]
wire [31:0] c1_result_bits; // @[Arithmetic.scala:491:26]
wire [8:0] c1_result_bits_rawIn_exp = _c1_resizer_io_out[31:23]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _c1_result_bits_rawIn_isZero_T = c1_result_bits_rawIn_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire c1_result_bits_rawIn_isZero = _c1_result_bits_rawIn_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
wire c1_result_bits_rawIn_isZero_0 = c1_result_bits_rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _c1_result_bits_rawIn_isSpecial_T = c1_result_bits_rawIn_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire c1_result_bits_rawIn_isSpecial = &_c1_result_bits_rawIn_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _c1_result_bits_rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _c1_result_bits_rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
wire _c1_result_bits_rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [9:0] _c1_result_bits_rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [24:0] _c1_result_bits_rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire c1_result_bits_rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire c1_result_bits_rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire c1_result_bits_rawIn_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [9:0] c1_result_bits_rawIn_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] c1_result_bits_rawIn_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _c1_result_bits_rawIn_out_isNaN_T = c1_result_bits_rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _c1_result_bits_rawIn_out_isInf_T = c1_result_bits_rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _c1_result_bits_rawIn_out_isNaN_T_1 = c1_result_bits_rawIn_isSpecial & _c1_result_bits_rawIn_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign c1_result_bits_rawIn_isNaN = _c1_result_bits_rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _c1_result_bits_rawIn_out_isInf_T_1 = ~_c1_result_bits_rawIn_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _c1_result_bits_rawIn_out_isInf_T_2 = c1_result_bits_rawIn_isSpecial & _c1_result_bits_rawIn_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign c1_result_bits_rawIn_isInf = _c1_result_bits_rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _c1_result_bits_rawIn_out_sign_T = _c1_resizer_io_out[32]; // @[rawFloatFromRecFN.scala:59:25]
assign c1_result_bits_rawIn_sign = _c1_result_bits_rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _c1_result_bits_rawIn_out_sExp_T = {1'h0, c1_result_bits_rawIn_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign c1_result_bits_rawIn_sExp = _c1_result_bits_rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _c1_result_bits_rawIn_out_sig_T = ~c1_result_bits_rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _c1_result_bits_rawIn_out_sig_T_1 = {1'h0, _c1_result_bits_rawIn_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [22:0] _c1_result_bits_rawIn_out_sig_T_2 = _c1_resizer_io_out[22:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _c1_result_bits_rawIn_out_sig_T_3 = {_c1_result_bits_rawIn_out_sig_T_1, _c1_result_bits_rawIn_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign c1_result_bits_rawIn_sig = _c1_result_bits_rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
wire c1_result_bits_isSubnormal = $signed(c1_result_bits_rawIn_sExp) < 10'sh82; // @[rawFloatFromRecFN.scala:55:23]
wire [4:0] _c1_result_bits_denormShiftDist_T = c1_result_bits_rawIn_sExp[4:0]; // @[rawFloatFromRecFN.scala:55:23]
wire [5:0] _c1_result_bits_denormShiftDist_T_1 = 6'h1 - {1'h0, _c1_result_bits_denormShiftDist_T}; // @[fNFromRecFN.scala:52:{35,47}]
wire [4:0] c1_result_bits_denormShiftDist = _c1_result_bits_denormShiftDist_T_1[4:0]; // @[fNFromRecFN.scala:52:35]
wire [23:0] _c1_result_bits_denormFract_T = c1_result_bits_rawIn_sig[24:1]; // @[rawFloatFromRecFN.scala:55:23]
wire [23:0] _c1_result_bits_denormFract_T_1 = _c1_result_bits_denormFract_T >> c1_result_bits_denormShiftDist; // @[fNFromRecFN.scala:52:35, :53:{38,42}]
wire [22:0] c1_result_bits_denormFract = _c1_result_bits_denormFract_T_1[22:0]; // @[fNFromRecFN.scala:53:{42,60}]
wire [7:0] _c1_result_bits_expOut_T = c1_result_bits_rawIn_sExp[7:0]; // @[rawFloatFromRecFN.scala:55:23]
wire [8:0] _c1_result_bits_expOut_T_1 = {1'h0, _c1_result_bits_expOut_T} - 9'h81; // @[fNFromRecFN.scala:58:{27,45}]
wire [7:0] _c1_result_bits_expOut_T_2 = _c1_result_bits_expOut_T_1[7:0]; // @[fNFromRecFN.scala:58:45]
wire [7:0] _c1_result_bits_expOut_T_3 = c1_result_bits_isSubnormal ? 8'h0 : _c1_result_bits_expOut_T_2; // @[fNFromRecFN.scala:51:38, :56:16, :58:45]
wire _c1_result_bits_expOut_T_4 = c1_result_bits_rawIn_isNaN | c1_result_bits_rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire [7:0] _c1_result_bits_expOut_T_5 = {8{_c1_result_bits_expOut_T_4}}; // @[fNFromRecFN.scala:60:{21,44}]
wire [7:0] c1_result_bits_expOut = _c1_result_bits_expOut_T_3 | _c1_result_bits_expOut_T_5; // @[fNFromRecFN.scala:56:16, :60:{15,21}]
wire [22:0] _c1_result_bits_fractOut_T = c1_result_bits_rawIn_sig[22:0]; // @[rawFloatFromRecFN.scala:55:23]
wire [22:0] _c1_result_bits_fractOut_T_1 = c1_result_bits_rawIn_isInf ? 23'h0 : _c1_result_bits_fractOut_T; // @[rawFloatFromRecFN.scala:55:23]
wire [22:0] c1_result_bits_fractOut = c1_result_bits_isSubnormal ? c1_result_bits_denormFract : _c1_result_bits_fractOut_T_1; // @[fNFromRecFN.scala:51:38, :53:60, :62:16, :64:20]
wire [8:0] c1_result_bits_hi = {c1_result_bits_rawIn_sign, c1_result_bits_expOut}; // @[rawFloatFromRecFN.scala:55:23]
assign _c1_result_bits_T = {c1_result_bits_hi, c1_result_bits_fractOut}; // @[fNFromRecFN.scala:62:16, :66:12]
assign c1_result_bits = _c1_result_bits_T; // @[fNFromRecFN.scala:66:12]
wire io_out_c_self_rec_rawIn_sign_2 = c2_bits[31]; // @[rawFloatFromFN.scala:44:18]
wire io_out_c_self_rec_rawIn_2_sign = io_out_c_self_rec_rawIn_sign_2; // @[rawFloatFromFN.scala:44:18, :63:19]
wire [7:0] io_out_c_self_rec_rawIn_expIn_2 = c2_bits[30:23]; // @[rawFloatFromFN.scala:45:19]
wire [22:0] io_out_c_self_rec_rawIn_fractIn_2 = c2_bits[22:0]; // @[rawFloatFromFN.scala:46:21]
wire io_out_c_self_rec_rawIn_isZeroExpIn_2 = io_out_c_self_rec_rawIn_expIn_2 == 8'h0; // @[rawFloatFromFN.scala:45:19, :48:30]
wire io_out_c_self_rec_rawIn_isZeroFractIn_2 = io_out_c_self_rec_rawIn_fractIn_2 == 23'h0; // @[rawFloatFromFN.scala:46:21, :49:34]
wire _io_out_c_self_rec_rawIn_normDist_T_88 = io_out_c_self_rec_rawIn_fractIn_2[0]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_89 = io_out_c_self_rec_rawIn_fractIn_2[1]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_90 = io_out_c_self_rec_rawIn_fractIn_2[2]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_91 = io_out_c_self_rec_rawIn_fractIn_2[3]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_92 = io_out_c_self_rec_rawIn_fractIn_2[4]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_93 = io_out_c_self_rec_rawIn_fractIn_2[5]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_94 = io_out_c_self_rec_rawIn_fractIn_2[6]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_95 = io_out_c_self_rec_rawIn_fractIn_2[7]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_96 = io_out_c_self_rec_rawIn_fractIn_2[8]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_97 = io_out_c_self_rec_rawIn_fractIn_2[9]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_98 = io_out_c_self_rec_rawIn_fractIn_2[10]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_99 = io_out_c_self_rec_rawIn_fractIn_2[11]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_100 = io_out_c_self_rec_rawIn_fractIn_2[12]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_101 = io_out_c_self_rec_rawIn_fractIn_2[13]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_102 = io_out_c_self_rec_rawIn_fractIn_2[14]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_103 = io_out_c_self_rec_rawIn_fractIn_2[15]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_104 = io_out_c_self_rec_rawIn_fractIn_2[16]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_105 = io_out_c_self_rec_rawIn_fractIn_2[17]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_106 = io_out_c_self_rec_rawIn_fractIn_2[18]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_107 = io_out_c_self_rec_rawIn_fractIn_2[19]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_108 = io_out_c_self_rec_rawIn_fractIn_2[20]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_109 = io_out_c_self_rec_rawIn_fractIn_2[21]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_self_rec_rawIn_normDist_T_110 = io_out_c_self_rec_rawIn_fractIn_2[22]; // @[rawFloatFromFN.scala:46:21]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_111 = _io_out_c_self_rec_rawIn_normDist_T_89 ? 5'h15 : 5'h16; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_112 = _io_out_c_self_rec_rawIn_normDist_T_90 ? 5'h14 : _io_out_c_self_rec_rawIn_normDist_T_111; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_113 = _io_out_c_self_rec_rawIn_normDist_T_91 ? 5'h13 : _io_out_c_self_rec_rawIn_normDist_T_112; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_114 = _io_out_c_self_rec_rawIn_normDist_T_92 ? 5'h12 : _io_out_c_self_rec_rawIn_normDist_T_113; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_115 = _io_out_c_self_rec_rawIn_normDist_T_93 ? 5'h11 : _io_out_c_self_rec_rawIn_normDist_T_114; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_116 = _io_out_c_self_rec_rawIn_normDist_T_94 ? 5'h10 : _io_out_c_self_rec_rawIn_normDist_T_115; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_117 = _io_out_c_self_rec_rawIn_normDist_T_95 ? 5'hF : _io_out_c_self_rec_rawIn_normDist_T_116; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_118 = _io_out_c_self_rec_rawIn_normDist_T_96 ? 5'hE : _io_out_c_self_rec_rawIn_normDist_T_117; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_119 = _io_out_c_self_rec_rawIn_normDist_T_97 ? 5'hD : _io_out_c_self_rec_rawIn_normDist_T_118; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_120 = _io_out_c_self_rec_rawIn_normDist_T_98 ? 5'hC : _io_out_c_self_rec_rawIn_normDist_T_119; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_121 = _io_out_c_self_rec_rawIn_normDist_T_99 ? 5'hB : _io_out_c_self_rec_rawIn_normDist_T_120; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_122 = _io_out_c_self_rec_rawIn_normDist_T_100 ? 5'hA : _io_out_c_self_rec_rawIn_normDist_T_121; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_123 = _io_out_c_self_rec_rawIn_normDist_T_101 ? 5'h9 : _io_out_c_self_rec_rawIn_normDist_T_122; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_124 = _io_out_c_self_rec_rawIn_normDist_T_102 ? 5'h8 : _io_out_c_self_rec_rawIn_normDist_T_123; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_125 = _io_out_c_self_rec_rawIn_normDist_T_103 ? 5'h7 : _io_out_c_self_rec_rawIn_normDist_T_124; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_126 = _io_out_c_self_rec_rawIn_normDist_T_104 ? 5'h6 : _io_out_c_self_rec_rawIn_normDist_T_125; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_127 = _io_out_c_self_rec_rawIn_normDist_T_105 ? 5'h5 : _io_out_c_self_rec_rawIn_normDist_T_126; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_128 = _io_out_c_self_rec_rawIn_normDist_T_106 ? 5'h4 : _io_out_c_self_rec_rawIn_normDist_T_127; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_129 = _io_out_c_self_rec_rawIn_normDist_T_107 ? 5'h3 : _io_out_c_self_rec_rawIn_normDist_T_128; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_130 = _io_out_c_self_rec_rawIn_normDist_T_108 ? 5'h2 : _io_out_c_self_rec_rawIn_normDist_T_129; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_self_rec_rawIn_normDist_T_131 = _io_out_c_self_rec_rawIn_normDist_T_109 ? 5'h1 : _io_out_c_self_rec_rawIn_normDist_T_130; // @[Mux.scala:50:70]
wire [4:0] io_out_c_self_rec_rawIn_normDist_2 = _io_out_c_self_rec_rawIn_normDist_T_110 ? 5'h0 : _io_out_c_self_rec_rawIn_normDist_T_131; // @[Mux.scala:50:70]
wire [53:0] _io_out_c_self_rec_rawIn_subnormFract_T_4 = {31'h0, io_out_c_self_rec_rawIn_fractIn_2} << io_out_c_self_rec_rawIn_normDist_2; // @[Mux.scala:50:70]
wire [21:0] _io_out_c_self_rec_rawIn_subnormFract_T_5 = _io_out_c_self_rec_rawIn_subnormFract_T_4[21:0]; // @[rawFloatFromFN.scala:52:{33,46}]
wire [22:0] io_out_c_self_rec_rawIn_subnormFract_2 = {_io_out_c_self_rec_rawIn_subnormFract_T_5, 1'h0}; // @[rawFloatFromFN.scala:52:{46,64}]
wire [8:0] _io_out_c_self_rec_rawIn_adjustedExp_T_10 = {4'hF, ~io_out_c_self_rec_rawIn_normDist_2}; // @[Mux.scala:50:70]
wire [8:0] _io_out_c_self_rec_rawIn_adjustedExp_T_11 = io_out_c_self_rec_rawIn_isZeroExpIn_2 ? _io_out_c_self_rec_rawIn_adjustedExp_T_10 : {1'h0, io_out_c_self_rec_rawIn_expIn_2}; // @[rawFloatFromFN.scala:45:19, :48:30, :54:10, :55:18]
wire [1:0] _io_out_c_self_rec_rawIn_adjustedExp_T_12 = io_out_c_self_rec_rawIn_isZeroExpIn_2 ? 2'h2 : 2'h1; // @[rawFloatFromFN.scala:48:30, :58:14]
wire [7:0] _io_out_c_self_rec_rawIn_adjustedExp_T_13 = {6'h20, _io_out_c_self_rec_rawIn_adjustedExp_T_12}; // @[rawFloatFromFN.scala:58:{9,14}]
wire [9:0] _io_out_c_self_rec_rawIn_adjustedExp_T_14 = {1'h0, _io_out_c_self_rec_rawIn_adjustedExp_T_11} + {2'h0, _io_out_c_self_rec_rawIn_adjustedExp_T_13}; // @[rawFloatFromFN.scala:54:10, :57:9, :58:9]
wire [8:0] io_out_c_self_rec_rawIn_adjustedExp_2 = _io_out_c_self_rec_rawIn_adjustedExp_T_14[8:0]; // @[rawFloatFromFN.scala:57:9]
wire [8:0] _io_out_c_self_rec_rawIn_out_sExp_T_4 = io_out_c_self_rec_rawIn_adjustedExp_2; // @[rawFloatFromFN.scala:57:9, :68:28]
wire io_out_c_self_rec_rawIn_isZero_2 = io_out_c_self_rec_rawIn_isZeroExpIn_2 & io_out_c_self_rec_rawIn_isZeroFractIn_2; // @[rawFloatFromFN.scala:48:30, :49:34, :60:30]
wire io_out_c_self_rec_rawIn_2_isZero = io_out_c_self_rec_rawIn_isZero_2; // @[rawFloatFromFN.scala:60:30, :63:19]
wire [1:0] _io_out_c_self_rec_rawIn_isSpecial_T_2 = io_out_c_self_rec_rawIn_adjustedExp_2[8:7]; // @[rawFloatFromFN.scala:57:9, :61:32]
wire io_out_c_self_rec_rawIn_isSpecial_2 = &_io_out_c_self_rec_rawIn_isSpecial_T_2; // @[rawFloatFromFN.scala:61:{32,57}]
wire _io_out_c_self_rec_rawIn_out_isNaN_T_5; // @[rawFloatFromFN.scala:64:28]
wire _io_out_c_self_rec_rawIn_out_isInf_T_2; // @[rawFloatFromFN.scala:65:28]
wire _io_out_c_self_rec_T_18 = io_out_c_self_rec_rawIn_2_isNaN; // @[recFNFromFN.scala:49:20]
wire [9:0] _io_out_c_self_rec_rawIn_out_sExp_T_5; // @[rawFloatFromFN.scala:68:42]
wire [24:0] _io_out_c_self_rec_rawIn_out_sig_T_11; // @[rawFloatFromFN.scala:70:27]
wire io_out_c_self_rec_rawIn_2_isInf; // @[rawFloatFromFN.scala:63:19]
wire [9:0] io_out_c_self_rec_rawIn_2_sExp; // @[rawFloatFromFN.scala:63:19]
wire [24:0] io_out_c_self_rec_rawIn_2_sig; // @[rawFloatFromFN.scala:63:19]
wire _io_out_c_self_rec_rawIn_out_isNaN_T_4 = ~io_out_c_self_rec_rawIn_isZeroFractIn_2; // @[rawFloatFromFN.scala:49:34, :64:31]
assign _io_out_c_self_rec_rawIn_out_isNaN_T_5 = io_out_c_self_rec_rawIn_isSpecial_2 & _io_out_c_self_rec_rawIn_out_isNaN_T_4; // @[rawFloatFromFN.scala:61:57, :64:{28,31}]
assign io_out_c_self_rec_rawIn_2_isNaN = _io_out_c_self_rec_rawIn_out_isNaN_T_5; // @[rawFloatFromFN.scala:63:19, :64:28]
assign _io_out_c_self_rec_rawIn_out_isInf_T_2 = io_out_c_self_rec_rawIn_isSpecial_2 & io_out_c_self_rec_rawIn_isZeroFractIn_2; // @[rawFloatFromFN.scala:49:34, :61:57, :65:28]
assign io_out_c_self_rec_rawIn_2_isInf = _io_out_c_self_rec_rawIn_out_isInf_T_2; // @[rawFloatFromFN.scala:63:19, :65:28]
assign _io_out_c_self_rec_rawIn_out_sExp_T_5 = {1'h0, _io_out_c_self_rec_rawIn_out_sExp_T_4}; // @[rawFloatFromFN.scala:68:{28,42}]
assign io_out_c_self_rec_rawIn_2_sExp = _io_out_c_self_rec_rawIn_out_sExp_T_5; // @[rawFloatFromFN.scala:63:19, :68:42]
wire _io_out_c_self_rec_rawIn_out_sig_T_8 = ~io_out_c_self_rec_rawIn_isZero_2; // @[rawFloatFromFN.scala:60:30, :70:19]
wire [1:0] _io_out_c_self_rec_rawIn_out_sig_T_9 = {1'h0, _io_out_c_self_rec_rawIn_out_sig_T_8}; // @[rawFloatFromFN.scala:70:{16,19}]
wire [22:0] _io_out_c_self_rec_rawIn_out_sig_T_10 = io_out_c_self_rec_rawIn_isZeroExpIn_2 ? io_out_c_self_rec_rawIn_subnormFract_2 : io_out_c_self_rec_rawIn_fractIn_2; // @[rawFloatFromFN.scala:46:21, :48:30, :52:64, :70:33]
assign _io_out_c_self_rec_rawIn_out_sig_T_11 = {_io_out_c_self_rec_rawIn_out_sig_T_9, _io_out_c_self_rec_rawIn_out_sig_T_10}; // @[rawFloatFromFN.scala:70:{16,27,33}]
assign io_out_c_self_rec_rawIn_2_sig = _io_out_c_self_rec_rawIn_out_sig_T_11; // @[rawFloatFromFN.scala:63:19, :70:27]
wire [2:0] _io_out_c_self_rec_T_16 = io_out_c_self_rec_rawIn_2_sExp[8:6]; // @[recFNFromFN.scala:48:50]
wire [2:0] _io_out_c_self_rec_T_17 = io_out_c_self_rec_rawIn_2_isZero ? 3'h0 : _io_out_c_self_rec_T_16; // @[recFNFromFN.scala:48:{15,50}]
wire [2:0] _io_out_c_self_rec_T_19 = {_io_out_c_self_rec_T_17[2:1], _io_out_c_self_rec_T_17[0] | _io_out_c_self_rec_T_18}; // @[recFNFromFN.scala:48:{15,76}, :49:20]
wire [3:0] _io_out_c_self_rec_T_20 = {io_out_c_self_rec_rawIn_2_sign, _io_out_c_self_rec_T_19}; // @[recFNFromFN.scala:47:20, :48:76]
wire [5:0] _io_out_c_self_rec_T_21 = io_out_c_self_rec_rawIn_2_sExp[5:0]; // @[recFNFromFN.scala:50:23]
wire [9:0] _io_out_c_self_rec_T_22 = {_io_out_c_self_rec_T_20, _io_out_c_self_rec_T_21}; // @[recFNFromFN.scala:47:20, :49:45, :50:23]
wire [22:0] _io_out_c_self_rec_T_23 = io_out_c_self_rec_rawIn_2_sig[22:0]; // @[recFNFromFN.scala:51:22]
wire [32:0] io_out_c_self_rec_2 = {_io_out_c_self_rec_T_22, _io_out_c_self_rec_T_23}; // @[recFNFromFN.scala:49:45, :50:41, :51:22]
wire [7:0] io_out_c_shift_exp_1; // @[Arithmetic.scala:442:29]
wire [6:0] _io_out_c_shift_exp_T_3 = _io_out_c_shift_exp_T_2[6:0]; // @[Arithmetic.scala:443:34]
assign io_out_c_shift_exp_1 = {1'h0, _io_out_c_shift_exp_T_3}; // @[Arithmetic.scala:442:29, :443:{19,34}]
wire [8:0] io_out_c_shift_fn_hi_1 = {1'h0, io_out_c_shift_exp_1}; // @[Arithmetic.scala:442:29, :444:27]
wire [31:0] io_out_c_shift_fn_1 = {io_out_c_shift_fn_hi_1, 23'h0}; // @[Arithmetic.scala:444:27]
wire io_out_c_shift_rec_rawIn_sign_1 = io_out_c_shift_fn_1[31]; // @[rawFloatFromFN.scala:44:18]
wire io_out_c_shift_rec_rawIn_1_sign = io_out_c_shift_rec_rawIn_sign_1; // @[rawFloatFromFN.scala:44:18, :63:19]
wire [7:0] io_out_c_shift_rec_rawIn_expIn_1 = io_out_c_shift_fn_1[30:23]; // @[rawFloatFromFN.scala:45:19]
wire [22:0] io_out_c_shift_rec_rawIn_fractIn_1 = io_out_c_shift_fn_1[22:0]; // @[rawFloatFromFN.scala:46:21]
wire io_out_c_shift_rec_rawIn_isZeroExpIn_1 = io_out_c_shift_rec_rawIn_expIn_1 == 8'h0; // @[rawFloatFromFN.scala:45:19, :48:30]
wire io_out_c_shift_rec_rawIn_isZeroFractIn_1 = io_out_c_shift_rec_rawIn_fractIn_1 == 23'h0; // @[rawFloatFromFN.scala:46:21, :49:34]
wire _io_out_c_shift_rec_rawIn_normDist_T_44 = io_out_c_shift_rec_rawIn_fractIn_1[0]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_45 = io_out_c_shift_rec_rawIn_fractIn_1[1]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_46 = io_out_c_shift_rec_rawIn_fractIn_1[2]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_47 = io_out_c_shift_rec_rawIn_fractIn_1[3]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_48 = io_out_c_shift_rec_rawIn_fractIn_1[4]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_49 = io_out_c_shift_rec_rawIn_fractIn_1[5]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_50 = io_out_c_shift_rec_rawIn_fractIn_1[6]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_51 = io_out_c_shift_rec_rawIn_fractIn_1[7]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_52 = io_out_c_shift_rec_rawIn_fractIn_1[8]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_53 = io_out_c_shift_rec_rawIn_fractIn_1[9]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_54 = io_out_c_shift_rec_rawIn_fractIn_1[10]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_55 = io_out_c_shift_rec_rawIn_fractIn_1[11]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_56 = io_out_c_shift_rec_rawIn_fractIn_1[12]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_57 = io_out_c_shift_rec_rawIn_fractIn_1[13]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_58 = io_out_c_shift_rec_rawIn_fractIn_1[14]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_59 = io_out_c_shift_rec_rawIn_fractIn_1[15]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_60 = io_out_c_shift_rec_rawIn_fractIn_1[16]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_61 = io_out_c_shift_rec_rawIn_fractIn_1[17]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_62 = io_out_c_shift_rec_rawIn_fractIn_1[18]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_63 = io_out_c_shift_rec_rawIn_fractIn_1[19]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_64 = io_out_c_shift_rec_rawIn_fractIn_1[20]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_65 = io_out_c_shift_rec_rawIn_fractIn_1[21]; // @[rawFloatFromFN.scala:46:21]
wire _io_out_c_shift_rec_rawIn_normDist_T_66 = io_out_c_shift_rec_rawIn_fractIn_1[22]; // @[rawFloatFromFN.scala:46:21]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_67 = _io_out_c_shift_rec_rawIn_normDist_T_45 ? 5'h15 : 5'h16; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_68 = _io_out_c_shift_rec_rawIn_normDist_T_46 ? 5'h14 : _io_out_c_shift_rec_rawIn_normDist_T_67; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_69 = _io_out_c_shift_rec_rawIn_normDist_T_47 ? 5'h13 : _io_out_c_shift_rec_rawIn_normDist_T_68; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_70 = _io_out_c_shift_rec_rawIn_normDist_T_48 ? 5'h12 : _io_out_c_shift_rec_rawIn_normDist_T_69; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_71 = _io_out_c_shift_rec_rawIn_normDist_T_49 ? 5'h11 : _io_out_c_shift_rec_rawIn_normDist_T_70; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_72 = _io_out_c_shift_rec_rawIn_normDist_T_50 ? 5'h10 : _io_out_c_shift_rec_rawIn_normDist_T_71; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_73 = _io_out_c_shift_rec_rawIn_normDist_T_51 ? 5'hF : _io_out_c_shift_rec_rawIn_normDist_T_72; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_74 = _io_out_c_shift_rec_rawIn_normDist_T_52 ? 5'hE : _io_out_c_shift_rec_rawIn_normDist_T_73; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_75 = _io_out_c_shift_rec_rawIn_normDist_T_53 ? 5'hD : _io_out_c_shift_rec_rawIn_normDist_T_74; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_76 = _io_out_c_shift_rec_rawIn_normDist_T_54 ? 5'hC : _io_out_c_shift_rec_rawIn_normDist_T_75; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_77 = _io_out_c_shift_rec_rawIn_normDist_T_55 ? 5'hB : _io_out_c_shift_rec_rawIn_normDist_T_76; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_78 = _io_out_c_shift_rec_rawIn_normDist_T_56 ? 5'hA : _io_out_c_shift_rec_rawIn_normDist_T_77; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_79 = _io_out_c_shift_rec_rawIn_normDist_T_57 ? 5'h9 : _io_out_c_shift_rec_rawIn_normDist_T_78; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_80 = _io_out_c_shift_rec_rawIn_normDist_T_58 ? 5'h8 : _io_out_c_shift_rec_rawIn_normDist_T_79; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_81 = _io_out_c_shift_rec_rawIn_normDist_T_59 ? 5'h7 : _io_out_c_shift_rec_rawIn_normDist_T_80; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_82 = _io_out_c_shift_rec_rawIn_normDist_T_60 ? 5'h6 : _io_out_c_shift_rec_rawIn_normDist_T_81; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_83 = _io_out_c_shift_rec_rawIn_normDist_T_61 ? 5'h5 : _io_out_c_shift_rec_rawIn_normDist_T_82; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_84 = _io_out_c_shift_rec_rawIn_normDist_T_62 ? 5'h4 : _io_out_c_shift_rec_rawIn_normDist_T_83; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_85 = _io_out_c_shift_rec_rawIn_normDist_T_63 ? 5'h3 : _io_out_c_shift_rec_rawIn_normDist_T_84; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_86 = _io_out_c_shift_rec_rawIn_normDist_T_64 ? 5'h2 : _io_out_c_shift_rec_rawIn_normDist_T_85; // @[Mux.scala:50:70]
wire [4:0] _io_out_c_shift_rec_rawIn_normDist_T_87 = _io_out_c_shift_rec_rawIn_normDist_T_65 ? 5'h1 : _io_out_c_shift_rec_rawIn_normDist_T_86; // @[Mux.scala:50:70]
wire [4:0] io_out_c_shift_rec_rawIn_normDist_1 = _io_out_c_shift_rec_rawIn_normDist_T_66 ? 5'h0 : _io_out_c_shift_rec_rawIn_normDist_T_87; // @[Mux.scala:50:70]
wire [53:0] _io_out_c_shift_rec_rawIn_subnormFract_T_2 = {31'h0, io_out_c_shift_rec_rawIn_fractIn_1} << io_out_c_shift_rec_rawIn_normDist_1; // @[Mux.scala:50:70]
wire [21:0] _io_out_c_shift_rec_rawIn_subnormFract_T_3 = _io_out_c_shift_rec_rawIn_subnormFract_T_2[21:0]; // @[rawFloatFromFN.scala:52:{33,46}]
wire [22:0] io_out_c_shift_rec_rawIn_subnormFract_1 = {_io_out_c_shift_rec_rawIn_subnormFract_T_3, 1'h0}; // @[rawFloatFromFN.scala:52:{46,64}]
wire [8:0] _io_out_c_shift_rec_rawIn_adjustedExp_T_5 = {4'hF, ~io_out_c_shift_rec_rawIn_normDist_1}; // @[Mux.scala:50:70]
wire [8:0] _io_out_c_shift_rec_rawIn_adjustedExp_T_6 = io_out_c_shift_rec_rawIn_isZeroExpIn_1 ? _io_out_c_shift_rec_rawIn_adjustedExp_T_5 : {1'h0, io_out_c_shift_rec_rawIn_expIn_1}; // @[rawFloatFromFN.scala:45:19, :48:30, :54:10, :55:18]
wire [1:0] _io_out_c_shift_rec_rawIn_adjustedExp_T_7 = io_out_c_shift_rec_rawIn_isZeroExpIn_1 ? 2'h2 : 2'h1; // @[rawFloatFromFN.scala:48:30, :58:14]
wire [7:0] _io_out_c_shift_rec_rawIn_adjustedExp_T_8 = {6'h20, _io_out_c_shift_rec_rawIn_adjustedExp_T_7}; // @[rawFloatFromFN.scala:58:{9,14}]
wire [9:0] _io_out_c_shift_rec_rawIn_adjustedExp_T_9 = {1'h0, _io_out_c_shift_rec_rawIn_adjustedExp_T_6} + {2'h0, _io_out_c_shift_rec_rawIn_adjustedExp_T_8}; // @[rawFloatFromFN.scala:54:10, :57:9, :58:9]
wire [8:0] io_out_c_shift_rec_rawIn_adjustedExp_1 = _io_out_c_shift_rec_rawIn_adjustedExp_T_9[8:0]; // @[rawFloatFromFN.scala:57:9]
wire [8:0] _io_out_c_shift_rec_rawIn_out_sExp_T_2 = io_out_c_shift_rec_rawIn_adjustedExp_1; // @[rawFloatFromFN.scala:57:9, :68:28]
wire io_out_c_shift_rec_rawIn_isZero_1 = io_out_c_shift_rec_rawIn_isZeroExpIn_1 & io_out_c_shift_rec_rawIn_isZeroFractIn_1; // @[rawFloatFromFN.scala:48:30, :49:34, :60:30]
wire io_out_c_shift_rec_rawIn_1_isZero = io_out_c_shift_rec_rawIn_isZero_1; // @[rawFloatFromFN.scala:60:30, :63:19]
wire [1:0] _io_out_c_shift_rec_rawIn_isSpecial_T_1 = io_out_c_shift_rec_rawIn_adjustedExp_1[8:7]; // @[rawFloatFromFN.scala:57:9, :61:32]
wire io_out_c_shift_rec_rawIn_isSpecial_1 = &_io_out_c_shift_rec_rawIn_isSpecial_T_1; // @[rawFloatFromFN.scala:61:{32,57}]
wire _io_out_c_shift_rec_rawIn_out_isNaN_T_3; // @[rawFloatFromFN.scala:64:28]
wire _io_out_c_shift_rec_rawIn_out_isInf_T_1; // @[rawFloatFromFN.scala:65:28]
wire _io_out_c_shift_rec_T_10 = io_out_c_shift_rec_rawIn_1_isNaN; // @[recFNFromFN.scala:49:20]
wire [9:0] _io_out_c_shift_rec_rawIn_out_sExp_T_3; // @[rawFloatFromFN.scala:68:42]
wire [24:0] _io_out_c_shift_rec_rawIn_out_sig_T_7; // @[rawFloatFromFN.scala:70:27]
wire io_out_c_shift_rec_rawIn_1_isInf; // @[rawFloatFromFN.scala:63:19]
wire [9:0] io_out_c_shift_rec_rawIn_1_sExp; // @[rawFloatFromFN.scala:63:19]
wire [24:0] io_out_c_shift_rec_rawIn_1_sig; // @[rawFloatFromFN.scala:63:19]
wire _io_out_c_shift_rec_rawIn_out_isNaN_T_2 = ~io_out_c_shift_rec_rawIn_isZeroFractIn_1; // @[rawFloatFromFN.scala:49:34, :64:31]
assign _io_out_c_shift_rec_rawIn_out_isNaN_T_3 = io_out_c_shift_rec_rawIn_isSpecial_1 & _io_out_c_shift_rec_rawIn_out_isNaN_T_2; // @[rawFloatFromFN.scala:61:57, :64:{28,31}]
assign io_out_c_shift_rec_rawIn_1_isNaN = _io_out_c_shift_rec_rawIn_out_isNaN_T_3; // @[rawFloatFromFN.scala:63:19, :64:28]
assign _io_out_c_shift_rec_rawIn_out_isInf_T_1 = io_out_c_shift_rec_rawIn_isSpecial_1 & io_out_c_shift_rec_rawIn_isZeroFractIn_1; // @[rawFloatFromFN.scala:49:34, :61:57, :65:28]
assign io_out_c_shift_rec_rawIn_1_isInf = _io_out_c_shift_rec_rawIn_out_isInf_T_1; // @[rawFloatFromFN.scala:63:19, :65:28]
assign _io_out_c_shift_rec_rawIn_out_sExp_T_3 = {1'h0, _io_out_c_shift_rec_rawIn_out_sExp_T_2}; // @[rawFloatFromFN.scala:68:{28,42}]
assign io_out_c_shift_rec_rawIn_1_sExp = _io_out_c_shift_rec_rawIn_out_sExp_T_3; // @[rawFloatFromFN.scala:63:19, :68:42]
wire _io_out_c_shift_rec_rawIn_out_sig_T_4 = ~io_out_c_shift_rec_rawIn_isZero_1; // @[rawFloatFromFN.scala:60:30, :70:19]
wire [1:0] _io_out_c_shift_rec_rawIn_out_sig_T_5 = {1'h0, _io_out_c_shift_rec_rawIn_out_sig_T_4}; // @[rawFloatFromFN.scala:70:{16,19}]
wire [22:0] _io_out_c_shift_rec_rawIn_out_sig_T_6 = io_out_c_shift_rec_rawIn_isZeroExpIn_1 ? io_out_c_shift_rec_rawIn_subnormFract_1 : io_out_c_shift_rec_rawIn_fractIn_1; // @[rawFloatFromFN.scala:46:21, :48:30, :52:64, :70:33]
assign _io_out_c_shift_rec_rawIn_out_sig_T_7 = {_io_out_c_shift_rec_rawIn_out_sig_T_5, _io_out_c_shift_rec_rawIn_out_sig_T_6}; // @[rawFloatFromFN.scala:70:{16,27,33}]
assign io_out_c_shift_rec_rawIn_1_sig = _io_out_c_shift_rec_rawIn_out_sig_T_7; // @[rawFloatFromFN.scala:63:19, :70:27]
wire [2:0] _io_out_c_shift_rec_T_8 = io_out_c_shift_rec_rawIn_1_sExp[8:6]; // @[recFNFromFN.scala:48:50]
wire [2:0] _io_out_c_shift_rec_T_9 = io_out_c_shift_rec_rawIn_1_isZero ? 3'h0 : _io_out_c_shift_rec_T_8; // @[recFNFromFN.scala:48:{15,50}]
wire [2:0] _io_out_c_shift_rec_T_11 = {_io_out_c_shift_rec_T_9[2:1], _io_out_c_shift_rec_T_9[0] | _io_out_c_shift_rec_T_10}; // @[recFNFromFN.scala:48:{15,76}, :49:20]
wire [3:0] _io_out_c_shift_rec_T_12 = {io_out_c_shift_rec_rawIn_1_sign, _io_out_c_shift_rec_T_11}; // @[recFNFromFN.scala:47:20, :48:76]
wire [5:0] _io_out_c_shift_rec_T_13 = io_out_c_shift_rec_rawIn_1_sExp[5:0]; // @[recFNFromFN.scala:50:23]
wire [9:0] _io_out_c_shift_rec_T_14 = {_io_out_c_shift_rec_T_12, _io_out_c_shift_rec_T_13}; // @[recFNFromFN.scala:47:20, :49:45, :50:23]
wire [22:0] _io_out_c_shift_rec_T_15 = io_out_c_shift_rec_rawIn_1_sig[22:0]; // @[recFNFromFN.scala:51:22]
wire [32:0] io_out_c_shift_rec_1 = {_io_out_c_shift_rec_T_14, _io_out_c_shift_rec_T_15}; // @[recFNFromFN.scala:49:45, :50:41, :51:22]
wire _io_out_c_T_4 = |io_out_c_shift_exp_1; // @[Arithmetic.scala:442:29, :447:26]
wire _io_out_c_T_6 = ~_io_out_c_T_5; // @[Arithmetic.scala:447:15]
wire _io_out_c_T_7 = ~_io_out_c_T_4; // @[Arithmetic.scala:447:{15,26}] |
Generate the Verilog code corresponding to the following Chisel files.
File BankBinder.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressSet, TransferSizes}
case class BankBinderNode(mask: BigInt)(implicit valName: ValName) extends TLCustomNode
{
private val bit = mask & -mask
val maxXfer = TransferSizes(1, if (bit == 0 || bit > 4096) 4096 else bit.toInt)
val ids = AddressSet.enumerateMask(mask)
def resolveStar(iKnown: Int, oKnown: Int, iStars: Int, oStars: Int): (Int, Int) = {
val ports = ids.size
val oStar = if (oStars == 0) 0 else (ports - oKnown) / oStars
val iStar = if (iStars == 0) 0 else (ports - iKnown) / iStars
require (ports == iKnown + iStar*iStars, s"${name} must have ${ports} inputs, but has ${iKnown} + ${iStar}*${iStars} (at ${lazyModule.line})")
require (ports == oKnown + oStar*oStars, s"${name} must have ${ports} outputs, but has ${oKnown} + ${oStar}*${oStars} (at ${lazyModule.line})")
(iStar, oStar)
}
def mapParamsD(n: Int, p: Seq[TLMasterPortParameters]): Seq[TLMasterPortParameters] =
(p zip ids) map { case (cp, id) => cp.v1copy(clients = cp.clients.map { c => c.v1copy(
visibility = c.visibility.flatMap { a => a.intersect(AddressSet(id, ~mask))},
supportsProbe = c.supports.probe intersect maxXfer,
supportsArithmetic = c.supports.arithmetic intersect maxXfer,
supportsLogical = c.supports.logical intersect maxXfer,
supportsGet = c.supports.get intersect maxXfer,
supportsPutFull = c.supports.putFull intersect maxXfer,
supportsPutPartial = c.supports.putPartial intersect maxXfer,
supportsHint = c.supports.hint intersect maxXfer)})}
def mapParamsU(n: Int, p: Seq[TLSlavePortParameters]): Seq[TLSlavePortParameters] =
(p zip ids) map { case (mp, id) => mp.v1copy(managers = mp.managers.flatMap { m =>
val addresses = m.address.flatMap(a => a.intersect(AddressSet(id, ~mask)))
if (addresses.nonEmpty)
Some(m.v1copy(
address = addresses,
supportsAcquireT = m.supportsAcquireT intersect maxXfer,
supportsAcquireB = m.supportsAcquireB intersect maxXfer,
supportsArithmetic = m.supportsArithmetic intersect maxXfer,
supportsLogical = m.supportsLogical intersect maxXfer,
supportsGet = m.supportsGet intersect maxXfer,
supportsPutFull = m.supportsPutFull intersect maxXfer,
supportsPutPartial = m.supportsPutPartial intersect maxXfer,
supportsHint = m.supportsHint intersect maxXfer))
else None
})}
}
/* A BankBinder is used to divide contiguous memory regions into banks, suitable for a cache */
class BankBinder(mask: BigInt)(implicit p: Parameters) extends LazyModule
{
val node = BankBinderNode(mask)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out <> in
}
}
}
object BankBinder
{
def apply(mask: BigInt)(implicit p: Parameters): TLNode = {
val binder = LazyModule(new BankBinder(mask))
binder.node
}
def apply(nBanks: Int, granularity: Int)(implicit p: Parameters): TLNode = {
if (nBanks > 0) apply(granularity * (nBanks-1))
else TLTempNode()
}
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
| module BankBinder( // @[BankBinder.scala:61:9]
input clock, // @[BankBinder.scala:61:9]
input reset, // @[BankBinder.scala:61:9]
output auto_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_corrupt // @[LazyModuleImp.scala:107:25]
);
TLMonitor_46 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (auto_out_a_ready),
.io_in_a_valid (auto_in_a_valid),
.io_in_a_bits_opcode (auto_in_a_bits_opcode),
.io_in_a_bits_param (auto_in_a_bits_param),
.io_in_a_bits_size (auto_in_a_bits_size),
.io_in_a_bits_source (auto_in_a_bits_source),
.io_in_a_bits_address (auto_in_a_bits_address),
.io_in_a_bits_mask (auto_in_a_bits_mask),
.io_in_a_bits_corrupt (auto_in_a_bits_corrupt),
.io_in_d_ready (auto_in_d_ready),
.io_in_d_valid (auto_out_d_valid),
.io_in_d_bits_opcode (auto_out_d_bits_opcode),
.io_in_d_bits_param (auto_out_d_bits_param),
.io_in_d_bits_size (auto_out_d_bits_size),
.io_in_d_bits_source (auto_out_d_bits_source),
.io_in_d_bits_sink (auto_out_d_bits_sink),
.io_in_d_bits_denied (auto_out_d_bits_denied),
.io_in_d_bits_corrupt (auto_out_d_bits_corrupt)
); // @[Nodes.scala:27:25]
assign auto_in_a_ready = auto_out_a_ready; // @[BankBinder.scala:61:9]
assign auto_in_d_valid = auto_out_d_valid; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_opcode = auto_out_d_bits_opcode; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_param = auto_out_d_bits_param; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_size = auto_out_d_bits_size; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_source = auto_out_d_bits_source; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_denied = auto_out_d_bits_denied; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_data = auto_out_d_bits_data; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_corrupt = auto_out_d_bits_corrupt; // @[BankBinder.scala:61:9]
assign auto_out_a_valid = auto_in_a_valid; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_opcode = auto_in_a_bits_opcode; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_param = auto_in_a_bits_param; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_size = auto_in_a_bits_size; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_source = auto_in_a_bits_source; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_address = auto_in_a_bits_address; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_mask = auto_in_a_bits_mask; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_data = auto_in_a_bits_data; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_corrupt = auto_in_a_bits_corrupt; // @[BankBinder.scala:61:9]
assign auto_out_d_ready = auto_in_d_ready; // @[BankBinder.scala:61:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_41( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_54 io_out_source_valid_1 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_147( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_157 io_out_sink_valid ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module TLBuffer_a29d64s8k1z4u( // @[Buffer.scala:40:9]
input clock, // @[Buffer.scala:40:9]
input reset, // @[Buffer.scala:40:9]
output auto_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [28:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [28:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_corrupt // @[LazyModuleImp.scala:107:25]
);
wire auto_in_a_valid_0 = auto_in_a_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_opcode_0 = auto_in_a_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_param_0 = auto_in_a_bits_param; // @[Buffer.scala:40:9]
wire [3:0] auto_in_a_bits_size_0 = auto_in_a_bits_size; // @[Buffer.scala:40:9]
wire [7:0] auto_in_a_bits_source_0 = auto_in_a_bits_source; // @[Buffer.scala:40:9]
wire [28:0] auto_in_a_bits_address_0 = auto_in_a_bits_address; // @[Buffer.scala:40:9]
wire [7:0] auto_in_a_bits_mask_0 = auto_in_a_bits_mask; // @[Buffer.scala:40:9]
wire [63:0] auto_in_a_bits_data_0 = auto_in_a_bits_data; // @[Buffer.scala:40:9]
wire auto_in_a_bits_corrupt_0 = auto_in_a_bits_corrupt; // @[Buffer.scala:40:9]
wire auto_in_d_ready_0 = auto_in_d_ready; // @[Buffer.scala:40:9]
wire auto_out_a_ready_0 = auto_out_a_ready; // @[Buffer.scala:40:9]
wire auto_out_d_valid_0 = auto_out_d_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_out_d_bits_opcode_0 = auto_out_d_bits_opcode; // @[Buffer.scala:40:9]
wire [1:0] auto_out_d_bits_param_0 = auto_out_d_bits_param; // @[Buffer.scala:40:9]
wire [3:0] auto_out_d_bits_size_0 = auto_out_d_bits_size; // @[Buffer.scala:40:9]
wire [7:0] auto_out_d_bits_source_0 = auto_out_d_bits_source; // @[Buffer.scala:40:9]
wire auto_out_d_bits_sink_0 = auto_out_d_bits_sink; // @[Buffer.scala:40:9]
wire auto_out_d_bits_denied_0 = auto_out_d_bits_denied; // @[Buffer.scala:40:9]
wire [63:0] auto_out_d_bits_data_0 = auto_out_d_bits_data; // @[Buffer.scala:40:9]
wire auto_out_d_bits_corrupt_0 = auto_out_d_bits_corrupt; // @[Buffer.scala:40:9]
wire nodeIn_a_ready; // @[MixedNode.scala:551:17]
wire nodeIn_a_valid = auto_in_a_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_opcode = auto_in_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_param = auto_in_a_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] nodeIn_a_bits_size = auto_in_a_bits_size_0; // @[Buffer.scala:40:9]
wire [7:0] nodeIn_a_bits_source = auto_in_a_bits_source_0; // @[Buffer.scala:40:9]
wire [28:0] nodeIn_a_bits_address = auto_in_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] nodeIn_a_bits_mask = auto_in_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] nodeIn_a_bits_data = auto_in_a_bits_data_0; // @[Buffer.scala:40:9]
wire nodeIn_a_bits_corrupt = auto_in_a_bits_corrupt_0; // @[Buffer.scala:40:9]
wire nodeIn_d_ready = auto_in_d_ready_0; // @[Buffer.scala:40:9]
wire nodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] nodeIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [7:0] nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] nodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire nodeOut_a_ready = auto_out_a_ready_0; // @[Buffer.scala:40:9]
wire nodeOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [28:0] nodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire nodeOut_d_ready; // @[MixedNode.scala:542:17]
wire nodeOut_d_valid = auto_out_d_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeOut_d_bits_opcode = auto_out_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [1:0] nodeOut_d_bits_param = auto_out_d_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] nodeOut_d_bits_size = auto_out_d_bits_size_0; // @[Buffer.scala:40:9]
wire [7:0] nodeOut_d_bits_source = auto_out_d_bits_source_0; // @[Buffer.scala:40:9]
wire nodeOut_d_bits_sink = auto_out_d_bits_sink_0; // @[Buffer.scala:40:9]
wire nodeOut_d_bits_denied = auto_out_d_bits_denied_0; // @[Buffer.scala:40:9]
wire [63:0] nodeOut_d_bits_data = auto_out_d_bits_data_0; // @[Buffer.scala:40:9]
wire nodeOut_d_bits_corrupt = auto_out_d_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_in_a_ready_0; // @[Buffer.scala:40:9]
wire [2:0] auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [1:0] auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
wire [7:0] auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
wire [63:0] auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_in_d_valid_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
wire [7:0] auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
wire [28:0] auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
wire auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_out_a_valid_0; // @[Buffer.scala:40:9]
wire auto_out_d_ready_0; // @[Buffer.scala:40:9]
assign auto_in_a_ready_0 = nodeIn_a_ready; // @[Buffer.scala:40:9]
assign auto_in_d_valid_0 = nodeIn_d_valid; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode_0 = nodeIn_d_bits_opcode; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param_0 = nodeIn_d_bits_param; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size_0 = nodeIn_d_bits_size; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source_0 = nodeIn_d_bits_source; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink_0 = nodeIn_d_bits_sink; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied_0 = nodeIn_d_bits_denied; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data_0 = nodeIn_d_bits_data; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt_0 = nodeIn_d_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_a_valid_0 = nodeOut_a_valid; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode_0 = nodeOut_a_bits_opcode; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param_0 = nodeOut_a_bits_param; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size_0 = nodeOut_a_bits_size; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source_0 = nodeOut_a_bits_source; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address_0 = nodeOut_a_bits_address; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask_0 = nodeOut_a_bits_mask; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data_0 = nodeOut_a_bits_data; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt_0 = nodeOut_a_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_d_ready_0 = nodeOut_d_ready; // @[Buffer.scala:40:9]
TLMonitor_22 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (nodeIn_a_ready), // @[MixedNode.scala:551:17]
.io_in_a_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_in_a_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_a_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_in_a_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_in_a_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_in_a_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_in_a_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_in_a_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_in_a_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_d_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_in_d_valid (nodeIn_d_valid), // @[MixedNode.scala:551:17]
.io_in_d_bits_opcode (nodeIn_d_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_d_bits_param (nodeIn_d_bits_param), // @[MixedNode.scala:551:17]
.io_in_d_bits_size (nodeIn_d_bits_size), // @[MixedNode.scala:551:17]
.io_in_d_bits_source (nodeIn_d_bits_source), // @[MixedNode.scala:551:17]
.io_in_d_bits_sink (nodeIn_d_bits_sink), // @[MixedNode.scala:551:17]
.io_in_d_bits_denied (nodeIn_d_bits_denied), // @[MixedNode.scala:551:17]
.io_in_d_bits_data (nodeIn_d_bits_data), // @[MixedNode.scala:551:17]
.io_in_d_bits_corrupt (nodeIn_d_bits_corrupt) // @[MixedNode.scala:551:17]
); // @[Nodes.scala:27:25]
Queue2_TLBundleA_a29d64s8k1z4u nodeOut_a_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeIn_a_ready),
.io_enq_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_enq_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_enq_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_enq_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_enq_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_enq_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_enq_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_enq_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_enq_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_deq_ready (nodeOut_a_ready), // @[MixedNode.scala:542:17]
.io_deq_valid (nodeOut_a_valid),
.io_deq_bits_opcode (nodeOut_a_bits_opcode),
.io_deq_bits_param (nodeOut_a_bits_param),
.io_deq_bits_size (nodeOut_a_bits_size),
.io_deq_bits_source (nodeOut_a_bits_source),
.io_deq_bits_address (nodeOut_a_bits_address),
.io_deq_bits_mask (nodeOut_a_bits_mask),
.io_deq_bits_data (nodeOut_a_bits_data),
.io_deq_bits_corrupt (nodeOut_a_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue2_TLBundleD_a29d64s8k1z4u nodeIn_d_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeOut_d_ready),
.io_enq_valid (nodeOut_d_valid), // @[MixedNode.scala:542:17]
.io_enq_bits_opcode (nodeOut_d_bits_opcode), // @[MixedNode.scala:542:17]
.io_enq_bits_param (nodeOut_d_bits_param), // @[MixedNode.scala:542:17]
.io_enq_bits_size (nodeOut_d_bits_size), // @[MixedNode.scala:542:17]
.io_enq_bits_source (nodeOut_d_bits_source), // @[MixedNode.scala:542:17]
.io_enq_bits_sink (nodeOut_d_bits_sink), // @[MixedNode.scala:542:17]
.io_enq_bits_denied (nodeOut_d_bits_denied), // @[MixedNode.scala:542:17]
.io_enq_bits_data (nodeOut_d_bits_data), // @[MixedNode.scala:542:17]
.io_enq_bits_corrupt (nodeOut_d_bits_corrupt), // @[MixedNode.scala:542:17]
.io_deq_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_deq_valid (nodeIn_d_valid),
.io_deq_bits_opcode (nodeIn_d_bits_opcode),
.io_deq_bits_param (nodeIn_d_bits_param),
.io_deq_bits_size (nodeIn_d_bits_size),
.io_deq_bits_source (nodeIn_d_bits_source),
.io_deq_bits_sink (nodeIn_d_bits_sink),
.io_deq_bits_denied (nodeIn_d_bits_denied),
.io_deq_bits_data (nodeIn_d_bits_data),
.io_deq_bits_corrupt (nodeIn_d_bits_corrupt)
); // @[Decoupled.scala:362:21]
assign auto_in_a_ready = auto_in_a_ready_0; // @[Buffer.scala:40:9]
assign auto_in_d_valid = auto_in_d_valid_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode = auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param = auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size = auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source = auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink = auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied = auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data = auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt = auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_a_valid = auto_out_a_valid_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode = auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param = auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size = auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source = auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address = auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask = auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data = auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt = auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_d_ready = auto_out_d_ready_0; // @[Buffer.scala:40:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_196( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_352 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module PE_463( // @[PE.scala:31:7]
input clock, // @[PE.scala:31:7]
input reset, // @[PE.scala:31:7]
input [7:0] io_in_a, // @[PE.scala:35:14]
input [19:0] io_in_b, // @[PE.scala:35:14]
input [19:0] io_in_d, // @[PE.scala:35:14]
output [7:0] io_out_a, // @[PE.scala:35:14]
output [19:0] io_out_b, // @[PE.scala:35:14]
output [19:0] io_out_c, // @[PE.scala:35:14]
input io_in_control_dataflow, // @[PE.scala:35:14]
input io_in_control_propagate, // @[PE.scala:35:14]
input [4:0] io_in_control_shift, // @[PE.scala:35:14]
output io_out_control_dataflow, // @[PE.scala:35:14]
output io_out_control_propagate, // @[PE.scala:35:14]
output [4:0] io_out_control_shift, // @[PE.scala:35:14]
input [2:0] io_in_id, // @[PE.scala:35:14]
output [2:0] io_out_id, // @[PE.scala:35:14]
input io_in_last, // @[PE.scala:35:14]
output io_out_last, // @[PE.scala:35:14]
input io_in_valid, // @[PE.scala:35:14]
output io_out_valid // @[PE.scala:35:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7]
wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7]
wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7]
wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7]
wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7]
wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7]
wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7]
wire io_in_last_0 = io_in_last; // @[PE.scala:31:7]
wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7]
wire io_bad_dataflow = 1'h0; // @[PE.scala:31:7]
wire _io_out_c_T_5 = 1'h0; // @[Arithmetic.scala:125:33]
wire _io_out_c_T_6 = 1'h0; // @[Arithmetic.scala:125:60]
wire _io_out_c_T_16 = 1'h0; // @[Arithmetic.scala:125:33]
wire _io_out_c_T_17 = 1'h0; // @[Arithmetic.scala:125:60]
wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7]
wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37]
wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37]
wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35]
wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7]
wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7]
wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7]
wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7]
wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7]
wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7]
wire [19:0] io_out_b_0; // @[PE.scala:31:7]
wire [19:0] io_out_c_0; // @[PE.scala:31:7]
reg [7:0] c1; // @[PE.scala:70:15]
wire [7:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15]
wire [7:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38]
reg [7:0] c2; // @[PE.scala:71:15]
wire [7:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15]
wire [7:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38]
reg last_s; // @[PE.scala:89:25]
wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21]
wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25]
wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25]
wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32]
wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32]
wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25]
wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53]
wire [7:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15]
wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}]
wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25]
wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27]
wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27]
wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_7 = {24'h0, _io_out_c_zeros_T_6[7:0] & _io_out_c_zeros_T_1}; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}]
wire [7:0] _GEN_2 = {3'h0, shift_offset}; // @[PE.scala:91:25]
wire [7:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15]
wire [7:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30]
wire [7:0] _io_out_c_T; // @[Arithmetic.scala:107:15]
assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33]
wire [8:0] _io_out_c_T_2 = {_io_out_c_T[7], _io_out_c_T} + {{7{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}]
wire [7:0] _io_out_c_T_3 = _io_out_c_T_2[7:0]; // @[Arithmetic.scala:107:28]
wire [7:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28]
wire [19:0] _io_out_c_T_7 = {{12{_io_out_c_T_4[7]}}, _io_out_c_T_4}; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_8 = _io_out_c_T_7; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_9 = _io_out_c_T_8; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37]
wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37]
wire [7:0] _c1_T = io_in_d_0[7:0]; // @[PE.scala:31:7]
wire [7:0] _c2_T = io_in_d_0[7:0]; // @[PE.scala:31:7]
wire [7:0] _c1_T_1 = _c1_T; // @[Arithmetic.scala:114:{15,33}]
wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53]
wire [7:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15]
wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}]
wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_16 = {24'h0, _io_out_c_zeros_T_15[7:0] & _io_out_c_zeros_T_10}; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}]
wire [7:0] _GEN_4 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15]
wire [7:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T_1 = _GEN_4; // @[Arithmetic.scala:103:30]
wire [7:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15]
assign _io_out_c_T_11 = _GEN_4; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33]
wire [8:0] _io_out_c_T_13 = {_io_out_c_T_11[7], _io_out_c_T_11} + {{7{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}]
wire [7:0] _io_out_c_T_14 = _io_out_c_T_13[7:0]; // @[Arithmetic.scala:107:28]
wire [7:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28]
wire [19:0] _io_out_c_T_18 = {{12{_io_out_c_T_15[7]}}, _io_out_c_T_15}; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_19 = _io_out_c_T_18; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_20 = _io_out_c_T_19; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37]
wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37]
wire [7:0] _c2_T_1 = _c2_T; // @[Arithmetic.scala:114:{15,33}]
wire [7:0] _mac_unit_io_in_b_T_5; // @[PE.scala:121:38]
assign _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38]
wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5; // @[PE.scala:121:38]
assign io_out_c_0 = io_in_control_propagate_0 ? {{12{c1[7]}}, c1} : {{12{c2[7]}}, c2}; // @[PE.scala:31:7, :70:15, :71:15, :119:30, :120:16, :126:16]
wire [7:0] _mac_unit_io_in_b_T_7; // @[PE.scala:127:38]
assign _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38]
wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7; // @[PE.scala:127:38]
wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35]
wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35]
always @(posedge clock) begin // @[PE.scala:31:7]
if (io_in_valid_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :102:95, :141:17, :142:8]
c1 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :70:15]
if (~(~io_in_valid_0 | io_in_control_propagate_0)) // @[PE.scala:31:7, :71:15, :102:95, :119:30, :130:10, :141:{9,17}, :143:8]
c2 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :71:15]
if (io_in_valid_0) // @[PE.scala:31:7]
last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25]
always @(posedge)
MacUnit_207 mac_unit ( // @[PE.scala:64:24]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0), // @[PE.scala:31:7]
.io_in_b (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3), // @[PE.scala:31:7, :119:30, :121:{24,38}, :127:{24,38}]
.io_in_c (io_in_b_0), // @[PE.scala:31:7]
.io_out_d (io_out_b_0)
); // @[PE.scala:64:24]
assign io_out_a = io_out_a_0; // @[PE.scala:31:7]
assign io_out_b = io_out_b_0; // @[PE.scala:31:7]
assign io_out_c = io_out_c_0; // @[PE.scala:31:7]
assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7]
assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7]
assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7]
assign io_out_id = io_out_id_0; // @[PE.scala:31:7]
assign io_out_last = io_out_last_0; // @[PE.scala:31:7]
assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Metadata.scala:
// See LICENSE.SiFive for license details.
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.constants.MemoryOpConstants
import freechips.rocketchip.util._
object ClientStates {
val width = 2
def Nothing = 0.U(width.W)
def Branch = 1.U(width.W)
def Trunk = 2.U(width.W)
def Dirty = 3.U(width.W)
def hasReadPermission(state: UInt): Bool = state > Nothing
def hasWritePermission(state: UInt): Bool = state > Branch
}
object MemoryOpCategories extends MemoryOpConstants {
def wr = Cat(true.B, true.B) // Op actually writes
def wi = Cat(false.B, true.B) // Future op will write
def rd = Cat(false.B, false.B) // Op only reads
def categorize(cmd: UInt): UInt = {
val cat = Cat(isWrite(cmd), isWriteIntent(cmd))
//assert(cat.isOneOf(wr,wi,rd), "Could not categorize command.")
cat
}
}
/** Stores the client-side coherence information,
* such as permissions on the data and whether the data is dirty.
* Its API can be used to make TileLink messages in response to
* memory operations, cache control oeprations, or Probe messages.
*/
class ClientMetadata extends Bundle {
/** Actual state information stored in this bundle */
val state = UInt(ClientStates.width.W)
/** Metadata equality */
def ===(rhs: UInt): Bool = state === rhs
def ===(rhs: ClientMetadata): Bool = state === rhs.state
def =/=(rhs: ClientMetadata): Bool = !this.===(rhs)
/** Is the block's data present in this cache */
def isValid(dummy: Int = 0): Bool = state > ClientStates.Nothing
/** Determine whether this cmd misses, and the new state (on hit) or param to be sent (on miss) */
private def growStarter(cmd: UInt): (Bool, UInt) = {
import MemoryOpCategories._
import TLPermissions._
import ClientStates._
val c = categorize(cmd)
MuxTLookup(Cat(c, state), (false.B, 0.U), Seq(
//(effect, am now) -> (was a hit, next)
Cat(rd, Dirty) -> (true.B, Dirty),
Cat(rd, Trunk) -> (true.B, Trunk),
Cat(rd, Branch) -> (true.B, Branch),
Cat(wi, Dirty) -> (true.B, Dirty),
Cat(wi, Trunk) -> (true.B, Trunk),
Cat(wr, Dirty) -> (true.B, Dirty),
Cat(wr, Trunk) -> (true.B, Dirty),
//(effect, am now) -> (was a miss, param)
Cat(rd, Nothing) -> (false.B, NtoB),
Cat(wi, Branch) -> (false.B, BtoT),
Cat(wi, Nothing) -> (false.B, NtoT),
Cat(wr, Branch) -> (false.B, BtoT),
Cat(wr, Nothing) -> (false.B, NtoT)))
}
/** Determine what state to go to after miss based on Grant param
* For now, doesn't depend on state (which may have been Probed).
*/
private def growFinisher(cmd: UInt, param: UInt): UInt = {
import MemoryOpCategories._
import TLPermissions._
import ClientStates._
val c = categorize(cmd)
//assert(c === rd || param === toT, "Client was expecting trunk permissions.")
MuxLookup(Cat(c, param), Nothing)(Seq(
//(effect param) -> (next)
Cat(rd, toB) -> Branch,
Cat(rd, toT) -> Trunk,
Cat(wi, toT) -> Trunk,
Cat(wr, toT) -> Dirty))
}
/** Does this cache have permissions on this block sufficient to perform op,
* and what to do next (Acquire message param or updated metadata). */
def onAccess(cmd: UInt): (Bool, UInt, ClientMetadata) = {
val r = growStarter(cmd)
(r._1, r._2, ClientMetadata(r._2))
}
/** Does a secondary miss on the block require another Acquire message */
def onSecondaryAccess(first_cmd: UInt, second_cmd: UInt): (Bool, Bool, UInt, ClientMetadata, UInt) = {
import MemoryOpCategories._
val r1 = growStarter(first_cmd)
val r2 = growStarter(second_cmd)
val needs_second_acq = isWriteIntent(second_cmd) && !isWriteIntent(first_cmd)
val hit_again = r1._1 && r2._1
val dirties = categorize(second_cmd) === wr
val biggest_grow_param = Mux(dirties, r2._2, r1._2)
val dirtiest_state = ClientMetadata(biggest_grow_param)
val dirtiest_cmd = Mux(dirties, second_cmd, first_cmd)
(needs_second_acq, hit_again, biggest_grow_param, dirtiest_state, dirtiest_cmd)
}
/** Metadata change on a returned Grant */
def onGrant(cmd: UInt, param: UInt): ClientMetadata = ClientMetadata(growFinisher(cmd, param))
/** Determine what state to go to based on Probe param */
private def shrinkHelper(param: UInt): (Bool, UInt, UInt) = {
import ClientStates._
import TLPermissions._
MuxTLookup(Cat(param, state), (false.B, 0.U, 0.U), Seq(
//(wanted, am now) -> (hasDirtyData resp, next)
Cat(toT, Dirty) -> (true.B, TtoT, Trunk),
Cat(toT, Trunk) -> (false.B, TtoT, Trunk),
Cat(toT, Branch) -> (false.B, BtoB, Branch),
Cat(toT, Nothing) -> (false.B, NtoN, Nothing),
Cat(toB, Dirty) -> (true.B, TtoB, Branch),
Cat(toB, Trunk) -> (false.B, TtoB, Branch), // Policy: Don't notify on clean downgrade
Cat(toB, Branch) -> (false.B, BtoB, Branch),
Cat(toB, Nothing) -> (false.B, NtoN, Nothing),
Cat(toN, Dirty) -> (true.B, TtoN, Nothing),
Cat(toN, Trunk) -> (false.B, TtoN, Nothing), // Policy: Don't notify on clean downgrade
Cat(toN, Branch) -> (false.B, BtoN, Nothing), // Policy: Don't notify on clean downgrade
Cat(toN, Nothing) -> (false.B, NtoN, Nothing)))
}
/** Translate cache control cmds into Probe param */
private def cmdToPermCap(cmd: UInt): UInt = {
import MemoryOpCategories._
import TLPermissions._
MuxLookup(cmd, toN)(Seq(
M_FLUSH -> toN,
M_PRODUCE -> toB,
M_CLEAN -> toT))
}
def onCacheControl(cmd: UInt): (Bool, UInt, ClientMetadata) = {
val r = shrinkHelper(cmdToPermCap(cmd))
(r._1, r._2, ClientMetadata(r._3))
}
def onProbe(param: UInt): (Bool, UInt, ClientMetadata) = {
val r = shrinkHelper(param)
(r._1, r._2, ClientMetadata(r._3))
}
}
/** Factories for ClientMetadata, including on reset */
object ClientMetadata {
def apply(perm: UInt) = {
val meta = Wire(new ClientMetadata)
meta.state := perm
meta
}
def onReset = ClientMetadata(ClientStates.Nothing)
def maximum = ClientMetadata(ClientStates.Dirty)
}
File HellaCache.scala:
// See LICENSE.SiFive for license details.
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.rocket
import chisel3.{dontTouch, _}
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.bundlebridge._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.amba.AMBAProtField
import freechips.rocketchip.diplomacy.{IdRange, TransferSizes, RegionType}
import freechips.rocketchip.tile.{L1CacheParams, HasL1CacheParameters, HasCoreParameters, CoreBundle, HasNonDiplomaticTileParameters, BaseTile, HasTileParameters}
import freechips.rocketchip.tilelink.{TLMasterParameters, TLClientNode, TLMasterPortParameters, TLEdgeOut, TLWidthWidget, TLFIFOFixer, ClientMetadata}
import freechips.rocketchip.util.{Code, RandomReplacement, ParameterizedBundle}
import freechips.rocketchip.util.{BooleanToAugmentedBoolean, IntToAugmentedInt}
import scala.collection.mutable.ListBuffer
case class DCacheParams(
nSets: Int = 64,
nWays: Int = 4,
rowBits: Int = 64,
subWordBits: Option[Int] = None,
replacementPolicy: String = "random",
nTLBSets: Int = 1,
nTLBWays: Int = 32,
nTLBBasePageSectors: Int = 4,
nTLBSuperpages: Int = 4,
tagECC: Option[String] = None,
dataECC: Option[String] = None,
dataECCBytes: Int = 1,
nMSHRs: Int = 1,
nSDQ: Int = 17,
nRPQ: Int = 16,
nMMIOs: Int = 1,
blockBytes: Int = 64,
separateUncachedResp: Boolean = false,
acquireBeforeRelease: Boolean = false,
pipelineWayMux: Boolean = false,
clockGate: Boolean = false,
scratch: Option[BigInt] = None) extends L1CacheParams {
def tagCode: Code = Code.fromString(tagECC)
def dataCode: Code = Code.fromString(dataECC)
def dataScratchpadBytes: Int = scratch.map(_ => nSets*blockBytes).getOrElse(0)
def replacement = new RandomReplacement(nWays)
def silentDrop: Boolean = !acquireBeforeRelease
require((!scratch.isDefined || nWays == 1),
"Scratchpad only allowed in direct-mapped cache.")
require((!scratch.isDefined || nMSHRs == 0),
"Scratchpad only allowed in blocking cache.")
if (scratch.isEmpty)
require(isPow2(nSets), s"nSets($nSets) must be pow2")
}
trait HasL1HellaCacheParameters extends HasL1CacheParameters with HasCoreParameters {
val cacheParams = tileParams.dcache.get
val cfg = cacheParams
def wordBits = coreDataBits
def wordBytes = coreDataBytes
def subWordBits = cacheParams.subWordBits.getOrElse(wordBits)
def subWordBytes = subWordBits / 8
def wordOffBits = log2Up(wordBytes)
def beatBytes = cacheBlockBytes / cacheDataBeats
def beatWords = beatBytes / wordBytes
def beatOffBits = log2Up(beatBytes)
def idxMSB = untagBits-1
def idxLSB = blockOffBits
def offsetmsb = idxLSB-1
def offsetlsb = wordOffBits
def rowWords = rowBits/wordBits
def doNarrowRead = coreDataBits * nWays % rowBits == 0
def eccBytes = cacheParams.dataECCBytes
val eccBits = cacheParams.dataECCBytes * 8
val encBits = cacheParams.dataCode.width(eccBits)
val encWordBits = encBits * (wordBits / eccBits)
def encDataBits = cacheParams.dataCode.width(coreDataBits) // NBDCache only
def encRowBits = encDataBits*rowWords
def lrscCycles = coreParams.lrscCycles // ISA requires 16-insn LRSC sequences to succeed
def lrscBackoff = 3 // disallow LRSC reacquisition briefly
def blockProbeAfterGrantCycles = 8 // give the processor some time to issue a request after a grant
def nIOMSHRs = cacheParams.nMMIOs
def maxUncachedInFlight = cacheParams.nMMIOs
def dataScratchpadSize = cacheParams.dataScratchpadBytes
require(rowBits >= coreDataBits, s"rowBits($rowBits) < coreDataBits($coreDataBits)")
if (!usingDataScratchpad)
require(rowBits == cacheDataBits, s"rowBits($rowBits) != cacheDataBits($cacheDataBits)")
// would need offset addr for puts if data width < xlen
require(xLen <= cacheDataBits, s"xLen($xLen) > cacheDataBits($cacheDataBits)")
}
abstract class L1HellaCacheModule(implicit val p: Parameters) extends Module
with HasL1HellaCacheParameters
abstract class L1HellaCacheBundle(implicit val p: Parameters) extends ParameterizedBundle()(p)
with HasL1HellaCacheParameters
/** Bundle definitions for HellaCache interfaces */
trait HasCoreMemOp extends HasL1HellaCacheParameters {
val addr = UInt(coreMaxAddrBits.W)
val idx = (usingVM && untagBits > pgIdxBits).option(UInt(coreMaxAddrBits.W))
val tag = UInt((coreParams.dcacheReqTagBits + log2Ceil(dcacheArbPorts)).W)
val cmd = UInt(M_SZ.W)
val size = UInt(log2Ceil(coreDataBytes.log2 + 1).W)
val signed = Bool()
val dprv = UInt(PRV.SZ.W)
val dv = Bool()
}
trait HasCoreData extends HasCoreParameters {
val data = UInt(coreDataBits.W)
val mask = UInt(coreDataBytes.W)
}
class HellaCacheReqInternal(implicit p: Parameters) extends CoreBundle()(p) with HasCoreMemOp {
val phys = Bool()
val no_resp = Bool() // The dcache may omit generating a response for this request
val no_alloc = Bool()
val no_xcpt = Bool()
}
class HellaCacheReq(implicit p: Parameters) extends HellaCacheReqInternal()(p) with HasCoreData
class HellaCacheResp(implicit p: Parameters) extends CoreBundle()(p)
with HasCoreMemOp
with HasCoreData {
val replay = Bool()
val has_data = Bool()
val data_word_bypass = UInt(coreDataBits.W)
val data_raw = UInt(coreDataBits.W)
val store_data = UInt(coreDataBits.W)
}
class AlignmentExceptions extends Bundle {
val ld = Bool()
val st = Bool()
}
class HellaCacheExceptions extends Bundle {
val ma = new AlignmentExceptions
val pf = new AlignmentExceptions
val gf = new AlignmentExceptions
val ae = new AlignmentExceptions
}
class HellaCacheWriteData(implicit p: Parameters) extends CoreBundle()(p) with HasCoreData
class HellaCachePerfEvents extends Bundle {
val acquire = Bool()
val release = Bool()
val grant = Bool()
val tlbMiss = Bool()
val blocked = Bool()
val canAcceptStoreThenLoad = Bool()
val canAcceptStoreThenRMW = Bool()
val canAcceptLoadThenLoad = Bool()
val storeBufferEmptyAfterLoad = Bool()
val storeBufferEmptyAfterStore = Bool()
}
// interface between D$ and processor/DTLB
class HellaCacheIO(implicit p: Parameters) extends CoreBundle()(p) {
val req = Decoupled(new HellaCacheReq)
val s1_kill = Output(Bool()) // kill previous cycle's req
val s1_data = Output(new HellaCacheWriteData()) // data for previous cycle's req
val s2_nack = Input(Bool()) // req from two cycles ago is rejected
val s2_nack_cause_raw = Input(Bool()) // reason for nack is store-load RAW hazard (performance hint)
val s2_kill = Output(Bool()) // kill req from two cycles ago
val s2_uncached = Input(Bool()) // advisory signal that the access is MMIO
val s2_paddr = Input(UInt(paddrBits.W)) // translated address
val resp = Flipped(Valid(new HellaCacheResp))
val replay_next = Input(Bool())
val s2_xcpt = Input(new HellaCacheExceptions)
val s2_gpa = Input(UInt(vaddrBitsExtended.W))
val s2_gpa_is_pte = Input(Bool())
val uncached_resp = tileParams.dcache.get.separateUncachedResp.option(Flipped(Decoupled(new HellaCacheResp)))
val ordered = Input(Bool())
val store_pending = Input(Bool()) // there is a store in a store buffer somewhere
val perf = Input(new HellaCachePerfEvents())
val keep_clock_enabled = Output(Bool()) // should D$ avoid clock-gating itself?
val clock_enabled = Input(Bool()) // is D$ currently being clocked?
}
/** Base classes for Diplomatic TL2 HellaCaches */
abstract class HellaCache(tileId: Int)(implicit p: Parameters) extends LazyModule
with HasNonDiplomaticTileParameters {
protected val cfg = tileParams.dcache.get
protected def cacheClientParameters = cfg.scratch.map(x => Seq()).getOrElse(Seq(TLMasterParameters.v1(
name = s"Core ${tileId} DCache",
sourceId = IdRange(0, 1 max cfg.nMSHRs),
supportsProbe = TransferSizes(cfg.blockBytes, cfg.blockBytes))))
protected def mmioClientParameters = Seq(TLMasterParameters.v1(
name = s"Core ${tileId} DCache MMIO",
sourceId = IdRange(firstMMIO, firstMMIO + cfg.nMMIOs),
requestFifo = true))
def firstMMIO = (cacheClientParameters.map(_.sourceId.end) :+ 0).max
val node = TLClientNode(Seq(TLMasterPortParameters.v1(
clients = cacheClientParameters ++ mmioClientParameters,
minLatency = 1,
requestFields = tileParams.core.useVM.option(Seq()).getOrElse(Seq(AMBAProtField())))))
val hartIdSinkNodeOpt = cfg.scratch.map(_ => BundleBridgeSink[UInt]())
val mmioAddressPrefixSinkNodeOpt = cfg.scratch.map(_ => BundleBridgeSink[UInt]())
val module: HellaCacheModule
def flushOnFenceI = cfg.scratch.isEmpty && !node.edges.out(0).manager.managers.forall(m => !m.supportsAcquireB || !m.executable || m.regionType >= RegionType.TRACKED || m.regionType <= RegionType.IDEMPOTENT)
def canSupportCFlushLine = !usingVM || cfg.blockBytes * cfg.nSets <= (1 << pgIdxBits)
require(!tileParams.core.haveCFlush || cfg.scratch.isEmpty, "CFLUSH_D_L1 instruction requires a D$")
}
class HellaCacheBundle(implicit p: Parameters) extends CoreBundle()(p) {
val cpu = Flipped(new HellaCacheIO)
val ptw = new TLBPTWIO()
val errors = new DCacheErrors
val tlb_port = new DCacheTLBPort
}
class HellaCacheModule(outer: HellaCache) extends LazyModuleImp(outer)
with HasL1HellaCacheParameters {
implicit val edge: TLEdgeOut = outer.node.edges.out(0)
val (tl_out, _) = outer.node.out(0)
val io = IO(new HellaCacheBundle)
val io_hartid = outer.hartIdSinkNodeOpt.map(_.bundle)
val io_mmio_address_prefix = outer.mmioAddressPrefixSinkNodeOpt.map(_.bundle)
dontTouch(io.cpu.resp) // Users like to monitor these fields even if the core ignores some signals
dontTouch(io.cpu.s1_data)
require(rowBits == edge.bundle.dataBits)
private val fifoManagers = edge.manager.managers.filter(TLFIFOFixer.allVolatile)
fifoManagers.foreach { m =>
require (m.fifoId == fifoManagers.head.fifoId,
s"IOMSHRs must be FIFO for all regions with effects, but HellaCache sees\n"+
s"${m.nodePath.map(_.name)}\nversus\n${fifoManagers.head.nodePath.map(_.name)}")
}
}
/** Support overriding which HellaCache is instantiated */
case object BuildHellaCache extends Field[BaseTile => Parameters => HellaCache](HellaCacheFactory.apply)
object HellaCacheFactory {
def apply(tile: BaseTile)(p: Parameters): HellaCache = {
if (tile.tileParams.dcache.get.nMSHRs == 0)
new DCache(tile.tileId, tile.crossing)(p)
else
new NonBlockingDCache(tile.tileId)(p)
}
}
/** Mix-ins for constructing tiles that have a HellaCache */
trait HasHellaCache { this: BaseTile =>
val module: HasHellaCacheModule
implicit val p: Parameters
var nDCachePorts = 0
lazy val dcache: HellaCache = LazyModule(p(BuildHellaCache)(this)(p))
tlMasterXbar.node := TLWidthWidget(tileParams.dcache.get.rowBits/8) := dcache.node
dcache.hartIdSinkNodeOpt.map { _ := hartIdNexusNode }
dcache.mmioAddressPrefixSinkNodeOpt.map { _ := mmioAddressPrefixNexusNode }
InModuleBody {
dcache.module.io.tlb_port := DontCare
}
}
trait HasHellaCacheModule {
val outer: HasHellaCache with HasTileParameters
implicit val p: Parameters
val dcachePorts = ListBuffer[HellaCacheIO]()
val dcacheArb = Module(new HellaCacheArbiter(outer.nDCachePorts)(outer.p))
outer.dcache.module.io.cpu <> dcacheArb.io.mem
}
/** Metadata array used for all HellaCaches */
class L1Metadata(implicit p: Parameters) extends L1HellaCacheBundle()(p) {
val coh = new ClientMetadata
val tag = UInt(tagBits.W)
}
object L1Metadata {
def apply(tag: Bits, coh: ClientMetadata)(implicit p: Parameters) = {
val meta = Wire(new L1Metadata)
meta.tag := tag
meta.coh := coh
meta
}
}
class L1MetaReadReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) {
val idx = UInt(idxBits.W)
val way_en = UInt(nWays.W)
val tag = UInt(tagBits.W)
}
class L1MetaWriteReq(implicit p: Parameters) extends L1MetaReadReq()(p) {
val data = new L1Metadata
}
class L1MetadataArray[T <: L1Metadata](onReset: () => T)(implicit p: Parameters) extends L1HellaCacheModule()(p) {
val rstVal = onReset()
val io = IO(new Bundle {
val read = Flipped(Decoupled(new L1MetaReadReq))
val write = Flipped(Decoupled(new L1MetaWriteReq))
val resp = Output(Vec(nWays, rstVal.cloneType))
})
val rst_cnt = RegInit(0.U(log2Up(nSets+1).W))
val rst = rst_cnt < nSets.U
val waddr = Mux(rst, rst_cnt, io.write.bits.idx)
val wdata = Mux(rst, rstVal, io.write.bits.data).asUInt
val wmask = Mux(rst || (nWays == 1).B, (-1).S, io.write.bits.way_en.asSInt).asBools
val rmask = Mux(rst || (nWays == 1).B, (-1).S, io.read.bits.way_en.asSInt).asBools
when (rst) { rst_cnt := rst_cnt+1.U }
val metabits = rstVal.getWidth
val tag_array = SyncReadMem(nSets, Vec(nWays, UInt(metabits.W)))
val wen = rst || io.write.valid
when (wen) {
tag_array.write(waddr, VecInit.fill(nWays)(wdata), wmask)
}
io.resp := tag_array.read(io.read.bits.idx, io.read.fire).map(_.asTypeOf(chiselTypeOf(rstVal)))
io.read.ready := !wen // so really this could be a 6T RAM
io.write.ready := !rst
}
File TagArray.scala:
package shuttle.dmem
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
import freechips.rocketchip.rocket._
import freechips.rocketchip.tile.{TileKey}
class L1MetadataArrayBank[T <: L1Metadata](onReset: () => T, nSets: Int)(implicit p: Parameters) extends L1HellaCacheModule()(p) {
val rstVal = onReset()
val io = IO(new Bundle {
val read = Flipped(Decoupled(new L1MetaReadReq))
val write = Flipped(Decoupled(new L1MetaWriteReq))
val resp = Vec(nWays, Output(rstVal.cloneType))
})
val rst_cnt = RegInit(0.U(log2Up(nSets+1).W))
val rst = rst_cnt < nSets.U
val waddr = Mux(rst, rst_cnt, io.write.bits.idx)
val wdata = Mux(rst, rstVal, io.write.bits.data).asUInt
val wmask = Mux(rst || (nWays == 1).B, ~(0.U(nWays.W)), io.write.bits.way_en).asBools
val rmask = Mux(rst || (nWays == 1).B, ~(0.U(nWays.W)), io.read.bits.way_en).asBools
when (rst) { rst_cnt := rst_cnt + 1.U }
val metabits = rstVal.getWidth
val tag_array = SyncReadMem(nSets, Vec(nWays, UInt(metabits.W)))
val wen = Wire(Bool())
val ren = Wire(Bool())
val stall_ctr = RegInit(0.U(2.W))
val stall_read = stall_ctr === ~(0.U(2.W))
val force_stall = WireInit(false.B)
val rbuf_valid = RegInit(false.B)
val rbuf_idx = Reg(UInt(log2Ceil(nSets).W))
val rbuf = Reg(io.resp.cloneType)
val forward_from_rbuf = WireInit(false.B)
when (io.read.valid && force_stall) {
stall_ctr := stall_ctr + 1.U
} .otherwise {
stall_ctr := 0.U
}
when (rst) {
wen := true.B
ren := false.B
io.read.ready := false.B
io.write.ready := false.B
} .otherwise {
when (io.read.valid && !stall_read) {
when (io.read.bits.idx === rbuf_idx && rbuf_valid) {
forward_from_rbuf := true.B
ren := false.B
wen := io.write.valid
io.read.ready := true.B
io.write.ready := true.B
} .otherwise {
ren := true.B
wen := false.B
io.read.ready := true.B
io.write.ready := false.B
force_stall := io.write.valid
}
} .otherwise {
ren := false.B
wen := io.write.valid
io.read.ready := false.B
io.write.ready := true.B
}
}
val s1_read_idx = RegEnable(io.read.bits.idx, io.read.valid)
when (RegNext(io.read.fire && !forward_from_rbuf) && !(io.write.valid && io.write.bits.idx === s1_read_idx)) {
rbuf_valid := true.B
rbuf_idx := s1_read_idx
rbuf := io.resp
}
when (io.write.valid && io.write.bits.idx === rbuf_idx) {
rbuf_valid := false.B
}
when (wen) {
tag_array.write(waddr, VecInit(Seq.fill(nWays) { wdata }), wmask)
}
io.resp := tag_array.read(io.read.bits.idx, ren && !wen).map(_.asTypeOf(rstVal))
when (RegNext(forward_from_rbuf)) {
io.resp := RegEnable(rbuf, forward_from_rbuf)
}
}
class L1MetadataArrayBanked[T <: L1Metadata](onReset: () => T, nBanks: Int, nReadPorts: Int, nWritePorts: Int)(implicit p: Parameters) extends L1HellaCacheModule()(p) {
val rstVal = onReset()
val io = IO(new Bundle {
val read = Flipped(Vec(nReadPorts, Decoupled(new L1MetaReadReq)))
val write = Flipped(Vec(nWritePorts, Decoupled(new L1MetaWriteReq)))
val resp = Vec(nReadPorts, Vec(nWays, Output(rstVal.cloneType)))
})
// No banking, duplicate the arrays
if (nBanks == 0) {
val arrays = Seq.fill(nReadPorts) { Module(new L1MetadataArrayBank(onReset, nSets)) }
(io.read zip arrays).map { case (r,a) => a.io.read <> r }
(io.resp zip arrays).map { case (r,a) => r <> a.io.resp }
val write_arb = Module(new Arbiter(new L1MetaWriteReq, nWritePorts))
write_arb.io.in <> io.write
write_arb.io.out.ready := arrays.map(_.io.write.ready).reduce(_&&_)
arrays.foreach(_.io.write.bits := write_arb.io.out.bits)
arrays.zipWithIndex.foreach { case (a,i) =>
a.io.write.valid := write_arb.io.out.valid && arrays.patch(i, Nil, 1).map(_.io.write.ready).reduce(_&&_)
}
} else if (nBanks == 1) {
val array = Module(new L1MetadataArrayBank(onReset, nSets))
val arb = Module(new Arbiter(new L1MetaReadReq, nReadPorts))
arb.io.in <> io.read
array.io.read <> arb.io.out
io.resp.foreach(_ <> array.io.resp)
val write_arb = Module(new Arbiter(new L1MetaWriteReq, nWritePorts))
write_arb.io.in <> io.write
array.io.write <> write_arb.io.out
} else {
val bankBits = log2Ceil(nBanks)
val arrays = Seq.fill(nBanks) { Module(new L1MetadataArrayBank(onReset, nSets/nBanks)) }
val arbs = Seq.fill(nBanks) { Module(new Arbiter(new L1MetaReadReq, nReadPorts)) }
val write_arbs = Seq.fill(nBanks) { Module(new Arbiter(new L1MetaWriteReq, nWritePorts)) }
io.read.foreach(_.ready := false.B)
io.resp.foreach(_ := DontCare)
for (i <- 0 until nReadPorts) {
for (b <- 0 until nBanks) {
arbs(b).io.in(i).valid := io.read(i).valid && io.read(i).bits.idx(bankBits-1,0) === b.U
arbs(b).io.in(i).bits := io.read(i).bits
arbs(b).io.in(i).bits.idx := io.read(i).bits.idx >> bankBits
when (io.read(i).bits.idx(bankBits-1,0) === b.U) { io.read(i).ready := arbs(b).io.in(i).ready }
}
io.resp(i) := Mux1H(UIntToOH(RegNext(io.read(i).bits.idx(bankBits-1,0))), arrays.map(_.io.resp))
}
(arbs zip arrays).map { case (r,a) => a.io.read <> r.io.out }
io.write.foreach(_.ready := false.B)
for (i <- 0 until nWritePorts) {
for (b <- 0 until nBanks) {
write_arbs(b).io.in(i).valid := io.write(i).valid && io.write(i).bits.idx(bankBits-1,0) === b.U
write_arbs(b).io.in(i).bits := io.write(i).bits
write_arbs(b).io.in(i).bits.idx := io.write(i).bits.idx >> bankBits
when (io.write(i).bits.idx(bankBits-1,0) === b.U) { io.write(i).ready := write_arbs(b).io.in(i).ready }
}
}
(write_arbs zip arrays).map { case (r,a) => a.io.write <> r.io.out }
}
}
| module L1MetadataArrayBank_1( // @[TagArray.scala:12:7]
input clock, // @[TagArray.scala:12:7]
input reset, // @[TagArray.scala:12:7]
output io_read_ready, // @[TagArray.scala:14:14]
input io_read_valid, // @[TagArray.scala:14:14]
input [5:0] io_read_bits_idx, // @[TagArray.scala:14:14]
input [3:0] io_read_bits_way_en, // @[TagArray.scala:14:14]
input [19:0] io_read_bits_tag, // @[TagArray.scala:14:14]
output io_write_ready, // @[TagArray.scala:14:14]
input io_write_valid, // @[TagArray.scala:14:14]
input [5:0] io_write_bits_idx, // @[TagArray.scala:14:14]
input [3:0] io_write_bits_way_en, // @[TagArray.scala:14:14]
input [19:0] io_write_bits_tag, // @[TagArray.scala:14:14]
input [1:0] io_write_bits_data_coh_state, // @[TagArray.scala:14:14]
input [19:0] io_write_bits_data_tag, // @[TagArray.scala:14:14]
output [1:0] io_resp_0_coh_state, // @[TagArray.scala:14:14]
output [19:0] io_resp_0_tag, // @[TagArray.scala:14:14]
output [1:0] io_resp_1_coh_state, // @[TagArray.scala:14:14]
output [19:0] io_resp_1_tag, // @[TagArray.scala:14:14]
output [1:0] io_resp_2_coh_state, // @[TagArray.scala:14:14]
output [19:0] io_resp_2_tag, // @[TagArray.scala:14:14]
output [1:0] io_resp_3_coh_state, // @[TagArray.scala:14:14]
output [19:0] io_resp_3_tag // @[TagArray.scala:14:14]
);
wire [3:0] tag_array_MPORT_1_addr; // @[TagArray.scala:87:28]
wire tag_array_MPORT_1_en; // @[TagArray.scala:87:51]
wire [3:0] tag_array_MPORT_addr; // @[TagArray.scala:85:20]
wire [87:0] _tag_array_RW0_rdata; // @[TagArray.scala:28:30]
wire io_read_valid_0 = io_read_valid; // @[TagArray.scala:12:7]
wire [5:0] io_read_bits_idx_0 = io_read_bits_idx; // @[TagArray.scala:12:7]
wire [3:0] io_read_bits_way_en_0 = io_read_bits_way_en; // @[TagArray.scala:12:7]
wire [19:0] io_read_bits_tag_0 = io_read_bits_tag; // @[TagArray.scala:12:7]
wire io_write_valid_0 = io_write_valid; // @[TagArray.scala:12:7]
wire [5:0] io_write_bits_idx_0 = io_write_bits_idx; // @[TagArray.scala:12:7]
wire [3:0] io_write_bits_way_en_0 = io_write_bits_way_en; // @[TagArray.scala:12:7]
wire [19:0] io_write_bits_tag_0 = io_write_bits_tag; // @[TagArray.scala:12:7]
wire [1:0] io_write_bits_data_coh_state_0 = io_write_bits_data_coh_state; // @[TagArray.scala:12:7]
wire [19:0] io_write_bits_data_tag_0 = io_write_bits_data_tag; // @[TagArray.scala:12:7]
wire [1:0] rstVal_meta_state = 2'h0; // @[Metadata.scala:160:20]
wire [1:0] rstVal_coh_state = 2'h0; // @[HellaCache.scala:305:20]
wire [19:0] rstVal_tag = 20'h0; // @[HellaCache.scala:305:20]
wire [3:0] _wmask_T_1 = 4'hF; // @[TagArray.scala:23:42]
wire [3:0] _rmask_T_1 = 4'hF; // @[TagArray.scala:24:42]
wire [1:0] _stall_read_T = 2'h3; // @[TagArray.scala:33:34]
wire io_read_ready_0; // @[TagArray.scala:12:7]
wire io_write_ready_0; // @[TagArray.scala:12:7]
wire [1:0] io_resp_0_coh_state_0; // @[TagArray.scala:12:7]
wire [19:0] io_resp_0_tag_0; // @[TagArray.scala:12:7]
wire [1:0] io_resp_1_coh_state_0; // @[TagArray.scala:12:7]
wire [19:0] io_resp_1_tag_0; // @[TagArray.scala:12:7]
wire [1:0] io_resp_2_coh_state_0; // @[TagArray.scala:12:7]
wire [19:0] io_resp_2_tag_0; // @[TagArray.scala:12:7]
wire [1:0] io_resp_3_coh_state_0; // @[TagArray.scala:12:7]
wire [19:0] io_resp_3_tag_0; // @[TagArray.scala:12:7]
reg [4:0] rst_cnt; // @[TagArray.scala:19:24]
wire rst = ~(rst_cnt[4]); // @[TagArray.scala:19:24, :20:21]
wire _wmask_T = rst; // @[TagArray.scala:20:21, :23:23]
wire _rmask_T = rst; // @[TagArray.scala:20:21, :24:23]
wire [5:0] _GEN = {1'h0, rst_cnt}; // @[TagArray.scala:19:24, :21:18]
wire [5:0] waddr = rst ? _GEN : io_write_bits_idx_0; // @[TagArray.scala:12:7, :20:21, :21:18]
wire [1:0] _wdata_T_coh_state = rst ? 2'h0 : io_write_bits_data_coh_state_0; // @[TagArray.scala:12:7, :20:21, :22:18]
wire [19:0] _wdata_T_tag = rst ? 20'h0 : io_write_bits_data_tag_0; // @[TagArray.scala:12:7, :20:21, :22:18]
wire [21:0] wdata = {_wdata_T_coh_state, _wdata_T_tag}; // @[TagArray.scala:22:{18,52}]
wire [3:0] _wmask_T_2 = _wmask_T ? 4'hF : io_write_bits_way_en_0; // @[TagArray.scala:12:7, :23:{18,23}]
wire wmask_0 = _wmask_T_2[0]; // @[TagArray.scala:23:{18,81}]
wire wmask_1 = _wmask_T_2[1]; // @[TagArray.scala:23:{18,81}]
wire wmask_2 = _wmask_T_2[2]; // @[TagArray.scala:23:{18,81}]
wire wmask_3 = _wmask_T_2[3]; // @[TagArray.scala:23:{18,81}]
wire [3:0] _rmask_T_2 = _rmask_T ? 4'hF : io_read_bits_way_en_0; // @[TagArray.scala:12:7, :24:{18,23}]
wire rmask_0 = _rmask_T_2[0]; // @[TagArray.scala:24:{18,80}]
wire rmask_1 = _rmask_T_2[1]; // @[TagArray.scala:24:{18,80}]
wire rmask_2 = _rmask_T_2[2]; // @[TagArray.scala:24:{18,80}]
wire rmask_3 = _rmask_T_2[3]; // @[TagArray.scala:24:{18,80}]
wire [5:0] _rst_cnt_T = _GEN + 6'h1; // @[TagArray.scala:21:18, :25:35]
wire [4:0] _rst_cnt_T_1 = _rst_cnt_T[4:0]; // @[TagArray.scala:25:35]
wire wen; // @[TagArray.scala:30:17]
wire ren; // @[TagArray.scala:31:17]
reg [1:0] stall_ctr; // @[TagArray.scala:32:26]
wire stall_read = &stall_ctr; // @[TagArray.scala:32:26, :33:30]
wire force_stall; // @[TagArray.scala:34:29]
reg rbuf_valid; // @[TagArray.scala:36:27]
reg [3:0] rbuf_idx; // @[TagArray.scala:37:21]
reg [1:0] rbuf_0_coh_state; // @[TagArray.scala:38:17]
reg [19:0] rbuf_0_tag; // @[TagArray.scala:38:17]
reg [1:0] rbuf_1_coh_state; // @[TagArray.scala:38:17]
reg [19:0] rbuf_1_tag; // @[TagArray.scala:38:17]
reg [1:0] rbuf_2_coh_state; // @[TagArray.scala:38:17]
reg [19:0] rbuf_2_tag; // @[TagArray.scala:38:17]
reg [1:0] rbuf_3_coh_state; // @[TagArray.scala:38:17]
reg [19:0] rbuf_3_tag; // @[TagArray.scala:38:17]
wire forward_from_rbuf; // @[TagArray.scala:39:35]
wire [2:0] _stall_ctr_T = {1'h0, stall_ctr} + 3'h1; // @[TagArray.scala:32:26, :42:28]
wire [1:0] _stall_ctr_T_1 = _stall_ctr_T[1:0]; // @[TagArray.scala:42:28]
wire _T_2 = io_read_valid_0 & ~stall_read; // @[TagArray.scala:12:7, :33:30, :53:{25,28}]
wire [5:0] _GEN_0 = {2'h0, rbuf_idx}; // @[TagArray.scala:37:21, :54:30]
wire _T_4 = io_read_bits_idx_0 == _GEN_0 & rbuf_valid; // @[TagArray.scala:12:7, :36:27, :54:{30,43}]
assign forward_from_rbuf = ~rst & _T_2 & _T_4; // @[TagArray.scala:20:21, :39:35, :47:14, :53:{25,41}, :54:{43,58}]
assign force_stall = ~rst & _T_2 & ~_T_4 & io_write_valid_0; // @[TagArray.scala:12:7, :20:21, :34:29, :39:35, :47:14, :53:{25,41}, :54:{43,58}, :65:21]
assign ren = ~rst & _T_2 & ~_T_4; // @[TagArray.scala:20:21, :31:17, :39:35, :47:14, :49:9, :53:{25,41}, :54:{43,58}, :56:13, :61:13, :68:11]
wire _GEN_1 = ~_T_2 | _T_4; // @[TagArray.scala:53:{25,41}, :54:{43,58}, :69:11]
assign wen = rst | _GEN_1 & io_write_valid_0; // @[TagArray.scala:12:7, :20:21, :30:17, :47:14, :48:9, :53:41, :54:58, :69:11]
assign io_read_ready_0 = ~rst & _T_2; // @[TagArray.scala:12:7, :20:21, :39:35, :47:14, :50:19, :53:{25,41}]
assign io_write_ready_0 = ~rst & _GEN_1; // @[TagArray.scala:12:7, :20:21, :39:35, :47:14, :51:20, :53:41, :54:58, :69:11]
reg [5:0] s1_read_idx; // @[TagArray.scala:74:30]
reg REG; // @[TagArray.scala:75:16]
assign tag_array_MPORT_addr = waddr[3:0]; // @[TagArray.scala:21:18, :85:20]
assign tag_array_MPORT_1_en = ren & ~wen; // @[TagArray.scala:30:17, :31:17, :87:{51,54}]
assign tag_array_MPORT_1_addr = io_read_bits_idx_0[3:0]; // @[TagArray.scala:12:7, :87:28]
reg REG_1; // @[TagArray.scala:88:16]
reg [1:0] r_0_coh_state; // @[TagArray.scala:89:25]
reg [19:0] r_0_tag; // @[TagArray.scala:89:25]
reg [1:0] r_1_coh_state; // @[TagArray.scala:89:25]
reg [19:0] r_1_tag; // @[TagArray.scala:89:25]
reg [1:0] r_2_coh_state; // @[TagArray.scala:89:25]
reg [19:0] r_2_tag; // @[TagArray.scala:89:25]
reg [1:0] r_3_coh_state; // @[TagArray.scala:89:25]
reg [19:0] r_3_tag; // @[TagArray.scala:89:25]
assign io_resp_0_coh_state_0 = REG_1 ? r_0_coh_state : _tag_array_RW0_rdata[21:20]; // @[TagArray.scala:12:7, :28:30, :87:{11,74}, :88:{16,37}, :89:{13,25}]
assign io_resp_0_tag_0 = REG_1 ? r_0_tag : _tag_array_RW0_rdata[19:0]; // @[TagArray.scala:12:7, :28:30, :87:{11,74}, :88:{16,37}, :89:{13,25}]
assign io_resp_1_coh_state_0 = REG_1 ? r_1_coh_state : _tag_array_RW0_rdata[43:42]; // @[TagArray.scala:12:7, :28:30, :87:{11,74}, :88:{16,37}, :89:{13,25}]
assign io_resp_1_tag_0 = REG_1 ? r_1_tag : _tag_array_RW0_rdata[41:22]; // @[TagArray.scala:12:7, :28:30, :87:{11,74}, :88:{16,37}, :89:{13,25}]
assign io_resp_2_coh_state_0 = REG_1 ? r_2_coh_state : _tag_array_RW0_rdata[65:64]; // @[TagArray.scala:12:7, :28:30, :87:{11,74}, :88:{16,37}, :89:{13,25}]
assign io_resp_2_tag_0 = REG_1 ? r_2_tag : _tag_array_RW0_rdata[63:44]; // @[TagArray.scala:12:7, :28:30, :87:{11,74}, :88:{16,37}, :89:{13,25}]
assign io_resp_3_coh_state_0 = REG_1 ? r_3_coh_state : _tag_array_RW0_rdata[87:86]; // @[TagArray.scala:12:7, :28:30, :87:{11,74}, :88:{16,37}, :89:{13,25}]
assign io_resp_3_tag_0 = REG_1 ? r_3_tag : _tag_array_RW0_rdata[85:66]; // @[TagArray.scala:12:7, :28:30, :87:{11,74}, :88:{16,37}, :89:{13,25}]
wire _T_11 = REG & ~(io_write_valid_0 & io_write_bits_idx_0 == s1_read_idx); // @[TagArray.scala:12:7, :74:30, :75:{16,53,56,73,94}]
always @(posedge clock) begin // @[TagArray.scala:12:7]
if (reset) begin // @[TagArray.scala:12:7]
rst_cnt <= 5'h0; // @[TagArray.scala:19:24]
stall_ctr <= 2'h0; // @[TagArray.scala:32:26]
rbuf_valid <= 1'h0; // @[TagArray.scala:36:27]
end
else begin // @[TagArray.scala:12:7]
if (rst) // @[TagArray.scala:20:21]
rst_cnt <= _rst_cnt_T_1; // @[TagArray.scala:19:24, :25:35]
stall_ctr <= io_read_valid_0 & force_stall ? _stall_ctr_T_1 : 2'h0; // @[TagArray.scala:12:7, :32:26, :34:29, :41:{23,39}, :42:{15,28}, :44:15]
rbuf_valid <= ~(io_write_valid_0 & io_write_bits_idx_0 == _GEN_0) & (_T_11 | rbuf_valid); // @[TagArray.scala:12:7, :36:27, :54:30, :75:{53,112}, :76:16, :80:{24,45,59}, :81:16]
end
if (_T_11) begin // @[TagArray.scala:75:53]
rbuf_idx <= s1_read_idx[3:0]; // @[TagArray.scala:37:21, :74:30, :77:14]
rbuf_0_coh_state <= io_resp_0_coh_state_0; // @[TagArray.scala:12:7, :38:17]
rbuf_0_tag <= io_resp_0_tag_0; // @[TagArray.scala:12:7, :38:17]
rbuf_1_coh_state <= io_resp_1_coh_state_0; // @[TagArray.scala:12:7, :38:17]
rbuf_1_tag <= io_resp_1_tag_0; // @[TagArray.scala:12:7, :38:17]
rbuf_2_coh_state <= io_resp_2_coh_state_0; // @[TagArray.scala:12:7, :38:17]
rbuf_2_tag <= io_resp_2_tag_0; // @[TagArray.scala:12:7, :38:17]
rbuf_3_coh_state <= io_resp_3_coh_state_0; // @[TagArray.scala:12:7, :38:17]
rbuf_3_tag <= io_resp_3_tag_0; // @[TagArray.scala:12:7, :38:17]
end
if (io_read_valid_0) // @[TagArray.scala:12:7]
s1_read_idx <= io_read_bits_idx_0; // @[TagArray.scala:12:7, :74:30]
REG <= io_read_ready_0 & io_read_valid_0 & ~forward_from_rbuf; // @[Decoupled.scala:51:35]
REG_1 <= forward_from_rbuf; // @[TagArray.scala:39:35, :88:16]
if (forward_from_rbuf) begin // @[TagArray.scala:39:35]
r_0_coh_state <= rbuf_0_coh_state; // @[TagArray.scala:38:17, :89:25]
r_0_tag <= rbuf_0_tag; // @[TagArray.scala:38:17, :89:25]
r_1_coh_state <= rbuf_1_coh_state; // @[TagArray.scala:38:17, :89:25]
r_1_tag <= rbuf_1_tag; // @[TagArray.scala:38:17, :89:25]
r_2_coh_state <= rbuf_2_coh_state; // @[TagArray.scala:38:17, :89:25]
r_2_tag <= rbuf_2_tag; // @[TagArray.scala:38:17, :89:25]
r_3_coh_state <= rbuf_3_coh_state; // @[TagArray.scala:38:17, :89:25]
r_3_tag <= rbuf_3_tag; // @[TagArray.scala:38:17, :89:25]
end
always @(posedge)
tag_array_1 tag_array ( // @[TagArray.scala:28:30]
.RW0_addr (wen ? tag_array_MPORT_addr : tag_array_MPORT_1_addr), // @[TagArray.scala:28:30, :30:17, :85:20, :87:28]
.RW0_en (tag_array_MPORT_1_en | wen), // @[TagArray.scala:28:30, :30:17, :87:51]
.RW0_clk (clock),
.RW0_wmode (wen), // @[TagArray.scala:30:17]
.RW0_wdata ({4{wdata}}), // @[TagArray.scala:22:52, :28:30]
.RW0_rdata (_tag_array_RW0_rdata),
.RW0_wmask ({wmask_3, wmask_2, wmask_1, wmask_0}) // @[TagArray.scala:23:81, :28:30]
); // @[TagArray.scala:28:30]
assign io_read_ready = io_read_ready_0; // @[TagArray.scala:12:7]
assign io_write_ready = io_write_ready_0; // @[TagArray.scala:12:7]
assign io_resp_0_coh_state = io_resp_0_coh_state_0; // @[TagArray.scala:12:7]
assign io_resp_0_tag = io_resp_0_tag_0; // @[TagArray.scala:12:7]
assign io_resp_1_coh_state = io_resp_1_coh_state_0; // @[TagArray.scala:12:7]
assign io_resp_1_tag = io_resp_1_tag_0; // @[TagArray.scala:12:7]
assign io_resp_2_coh_state = io_resp_2_coh_state_0; // @[TagArray.scala:12:7]
assign io_resp_2_tag = io_resp_2_tag_0; // @[TagArray.scala:12:7]
assign io_resp_3_coh_state = io_resp_3_coh_state_0; // @[TagArray.scala:12:7]
assign io_resp_3_tag = io_resp_3_tag_0; // @[TagArray.scala:12:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Tile.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
import Util._
/**
* A Tile is a purely combinational 2D array of passThrough PEs.
* a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs
* @param width The data width of each PE in bits
* @param rows Number of PEs on each row
* @param columns Number of PEs on each column
*/
class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module {
val io = IO(new Bundle {
val in_a = Input(Vec(rows, inputType))
val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it
val in_d = Input(Vec(columns, outputType))
val in_control = Input(Vec(columns, new PEControl(accType)))
val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val in_last = Input(Vec(columns, Bool()))
val out_a = Output(Vec(rows, inputType))
val out_c = Output(Vec(columns, outputType))
val out_b = Output(Vec(columns, outputType))
val out_control = Output(Vec(columns, new PEControl(accType)))
val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val out_last = Output(Vec(columns, Bool()))
val in_valid = Input(Vec(columns, Bool()))
val out_valid = Output(Vec(columns, Bool()))
val bad_dataflow = Output(Bool())
})
import ev._
val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls)))
val tileT = tile.transpose
// TODO: abstract hori/vert broadcast, all these connections look the same
// Broadcast 'a' horizontally across the Tile
for (r <- 0 until rows) {
tile(r).foldLeft(io.in_a(r)) {
case (in_a, pe) =>
pe.io.in_a := in_a
pe.io.out_a
}
}
// Broadcast 'b' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_b(c)) {
case (in_b, pe) =>
pe.io.in_b := (if (tree_reduction) in_b.zero else in_b)
pe.io.out_b
}
}
// Broadcast 'd' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_d(c)) {
case (in_d, pe) =>
pe.io.in_d := in_d
pe.io.out_c
}
}
// Broadcast 'control' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_control(c)) {
case (in_ctrl, pe) =>
pe.io.in_control := in_ctrl
pe.io.out_control
}
}
// Broadcast 'garbage' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_valid(c)) {
case (v, pe) =>
pe.io.in_valid := v
pe.io.out_valid
}
}
// Broadcast 'id' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_id(c)) {
case (id, pe) =>
pe.io.in_id := id
pe.io.out_id
}
}
// Broadcast 'last' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_last(c)) {
case (last, pe) =>
pe.io.in_last := last
pe.io.out_last
}
}
// Drive the Tile's bottom IO
for (c <- 0 until columns) {
io.out_c(c) := tile(rows-1)(c).io.out_c
io.out_control(c) := tile(rows-1)(c).io.out_control
io.out_id(c) := tile(rows-1)(c).io.out_id
io.out_last(c) := tile(rows-1)(c).io.out_last
io.out_valid(c) := tile(rows-1)(c).io.out_valid
io.out_b(c) := {
if (tree_reduction) {
val prods = tileT(c).map(_.io.out_b)
accumulateTree(prods :+ io.in_b(c))
} else {
tile(rows - 1)(c).io.out_b
}
}
}
io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_)
// Drive the Tile's right IO
for (r <- 0 until rows) {
io.out_a(r) := tile(r)(columns-1).io.out_a
}
}
| module Tile_132( // @[Tile.scala:16:7]
input clock, // @[Tile.scala:16:7]
input reset, // @[Tile.scala:16:7]
input [7:0] io_in_a_0, // @[Tile.scala:17:14]
input [19:0] io_in_b_0, // @[Tile.scala:17:14]
input [19:0] io_in_d_0, // @[Tile.scala:17:14]
input io_in_control_0_dataflow, // @[Tile.scala:17:14]
input io_in_control_0_propagate, // @[Tile.scala:17:14]
input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14]
input [2:0] io_in_id_0, // @[Tile.scala:17:14]
input io_in_last_0, // @[Tile.scala:17:14]
output [7:0] io_out_a_0, // @[Tile.scala:17:14]
output [19:0] io_out_c_0, // @[Tile.scala:17:14]
output [19:0] io_out_b_0, // @[Tile.scala:17:14]
output io_out_control_0_dataflow, // @[Tile.scala:17:14]
output io_out_control_0_propagate, // @[Tile.scala:17:14]
output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14]
output [2:0] io_out_id_0, // @[Tile.scala:17:14]
output io_out_last_0, // @[Tile.scala:17:14]
input io_in_valid_0, // @[Tile.scala:17:14]
output io_out_valid_0, // @[Tile.scala:17:14]
output io_bad_dataflow // @[Tile.scala:17:14]
);
wire [7:0] io_in_a_0_0 = io_in_a_0; // @[Tile.scala:16:7]
wire [19:0] io_in_b_0_0 = io_in_b_0; // @[Tile.scala:16:7]
wire [19:0] io_in_d_0_0 = io_in_d_0; // @[Tile.scala:16:7]
wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7]
wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7]
wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7]
wire [2:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7]
wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7]
wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7]
wire [7:0] io_out_a_0_0; // @[Tile.scala:16:7]
wire [19:0] io_out_c_0_0; // @[Tile.scala:16:7]
wire [19:0] io_out_b_0_0; // @[Tile.scala:16:7]
wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
wire io_out_control_0_propagate_0; // @[Tile.scala:16:7]
wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7]
wire [2:0] io_out_id_0_0; // @[Tile.scala:16:7]
wire io_out_last_0_0; // @[Tile.scala:16:7]
wire io_out_valid_0_0; // @[Tile.scala:16:7]
wire io_bad_dataflow_0; // @[Tile.scala:16:7]
PE_388 tile_0_0 ( // @[Tile.scala:42:44]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0_0), // @[Tile.scala:16:7]
.io_in_b (io_in_b_0_0), // @[Tile.scala:16:7]
.io_in_d (io_in_d_0_0), // @[Tile.scala:16:7]
.io_out_a (io_out_a_0_0),
.io_out_b (io_out_b_0_0),
.io_out_c (io_out_c_0_0),
.io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7]
.io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7]
.io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7]
.io_out_control_dataflow (io_out_control_0_dataflow_0),
.io_out_control_propagate (io_out_control_0_propagate_0),
.io_out_control_shift (io_out_control_0_shift_0),
.io_in_id (io_in_id_0_0), // @[Tile.scala:16:7]
.io_out_id (io_out_id_0_0),
.io_in_last (io_in_last_0_0), // @[Tile.scala:16:7]
.io_out_last (io_out_last_0_0),
.io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7]
.io_out_valid (io_out_valid_0_0),
.io_bad_dataflow (io_bad_dataflow_0)
); // @[Tile.scala:42:44]
assign io_out_a_0 = io_out_a_0_0; // @[Tile.scala:16:7]
assign io_out_c_0 = io_out_c_0_0; // @[Tile.scala:16:7]
assign io_out_b_0 = io_out_b_0_0; // @[Tile.scala:16:7]
assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7]
assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7]
assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7]
assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7]
assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7]
assign io_bad_dataflow = io_bad_dataflow_0; // @[Tile.scala:16:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.diplomacy.{
AddressDecoder, AddressSet, BufferParams, DirectedBuffers, IdMap, IdMapEntry,
IdRange, RegionType, TransferSizes
}
import freechips.rocketchip.resources.{Resource, ResourceAddress, ResourcePermissions}
import freechips.rocketchip.util.{
AsyncQueueParams, BundleField, BundleFieldBase, BundleKeyBase,
CreditedDelay, groupByIntoSeq, RationalDirection, SimpleProduct
}
import scala.math.max
//These transfer sizes describe requests issued from masters on the A channel that will be responded by slaves on the D channel
case class TLMasterToSlaveTransferSizes(
// Supports both Acquire+Release of the following two sizes:
acquireT: TransferSizes = TransferSizes.none,
acquireB: TransferSizes = TransferSizes.none,
arithmetic: TransferSizes = TransferSizes.none,
logical: TransferSizes = TransferSizes.none,
get: TransferSizes = TransferSizes.none,
putFull: TransferSizes = TransferSizes.none,
putPartial: TransferSizes = TransferSizes.none,
hint: TransferSizes = TransferSizes.none)
extends TLCommonTransferSizes {
def intersect(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes(
acquireT = acquireT .intersect(rhs.acquireT),
acquireB = acquireB .intersect(rhs.acquireB),
arithmetic = arithmetic.intersect(rhs.arithmetic),
logical = logical .intersect(rhs.logical),
get = get .intersect(rhs.get),
putFull = putFull .intersect(rhs.putFull),
putPartial = putPartial.intersect(rhs.putPartial),
hint = hint .intersect(rhs.hint))
def mincover(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes(
acquireT = acquireT .mincover(rhs.acquireT),
acquireB = acquireB .mincover(rhs.acquireB),
arithmetic = arithmetic.mincover(rhs.arithmetic),
logical = logical .mincover(rhs.logical),
get = get .mincover(rhs.get),
putFull = putFull .mincover(rhs.putFull),
putPartial = putPartial.mincover(rhs.putPartial),
hint = hint .mincover(rhs.hint))
// Reduce rendering to a simple yes/no per field
override def toString = {
def str(x: TransferSizes, flag: String) = if (x.none) "" else flag
def flags = Vector(
str(acquireT, "T"),
str(acquireB, "B"),
str(arithmetic, "A"),
str(logical, "L"),
str(get, "G"),
str(putFull, "F"),
str(putPartial, "P"),
str(hint, "H"))
flags.mkString
}
// Prints out the actual information in a user readable way
def infoString = {
s"""acquireT = ${acquireT}
|acquireB = ${acquireB}
|arithmetic = ${arithmetic}
|logical = ${logical}
|get = ${get}
|putFull = ${putFull}
|putPartial = ${putPartial}
|hint = ${hint}
|
|""".stripMargin
}
}
object TLMasterToSlaveTransferSizes {
def unknownEmits = TLMasterToSlaveTransferSizes(
acquireT = TransferSizes(1, 4096),
acquireB = TransferSizes(1, 4096),
arithmetic = TransferSizes(1, 4096),
logical = TransferSizes(1, 4096),
get = TransferSizes(1, 4096),
putFull = TransferSizes(1, 4096),
putPartial = TransferSizes(1, 4096),
hint = TransferSizes(1, 4096))
def unknownSupports = TLMasterToSlaveTransferSizes()
}
//These transfer sizes describe requests issued from slaves on the B channel that will be responded by masters on the C channel
case class TLSlaveToMasterTransferSizes(
probe: TransferSizes = TransferSizes.none,
arithmetic: TransferSizes = TransferSizes.none,
logical: TransferSizes = TransferSizes.none,
get: TransferSizes = TransferSizes.none,
putFull: TransferSizes = TransferSizes.none,
putPartial: TransferSizes = TransferSizes.none,
hint: TransferSizes = TransferSizes.none
) extends TLCommonTransferSizes {
def intersect(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes(
probe = probe .intersect(rhs.probe),
arithmetic = arithmetic.intersect(rhs.arithmetic),
logical = logical .intersect(rhs.logical),
get = get .intersect(rhs.get),
putFull = putFull .intersect(rhs.putFull),
putPartial = putPartial.intersect(rhs.putPartial),
hint = hint .intersect(rhs.hint)
)
def mincover(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes(
probe = probe .mincover(rhs.probe),
arithmetic = arithmetic.mincover(rhs.arithmetic),
logical = logical .mincover(rhs.logical),
get = get .mincover(rhs.get),
putFull = putFull .mincover(rhs.putFull),
putPartial = putPartial.mincover(rhs.putPartial),
hint = hint .mincover(rhs.hint)
)
// Reduce rendering to a simple yes/no per field
override def toString = {
def str(x: TransferSizes, flag: String) = if (x.none) "" else flag
def flags = Vector(
str(probe, "P"),
str(arithmetic, "A"),
str(logical, "L"),
str(get, "G"),
str(putFull, "F"),
str(putPartial, "P"),
str(hint, "H"))
flags.mkString
}
// Prints out the actual information in a user readable way
def infoString = {
s"""probe = ${probe}
|arithmetic = ${arithmetic}
|logical = ${logical}
|get = ${get}
|putFull = ${putFull}
|putPartial = ${putPartial}
|hint = ${hint}
|
|""".stripMargin
}
}
object TLSlaveToMasterTransferSizes {
def unknownEmits = TLSlaveToMasterTransferSizes(
arithmetic = TransferSizes(1, 4096),
logical = TransferSizes(1, 4096),
get = TransferSizes(1, 4096),
putFull = TransferSizes(1, 4096),
putPartial = TransferSizes(1, 4096),
hint = TransferSizes(1, 4096),
probe = TransferSizes(1, 4096))
def unknownSupports = TLSlaveToMasterTransferSizes()
}
trait TLCommonTransferSizes {
def arithmetic: TransferSizes
def logical: TransferSizes
def get: TransferSizes
def putFull: TransferSizes
def putPartial: TransferSizes
def hint: TransferSizes
}
class TLSlaveParameters private(
val nodePath: Seq[BaseNode],
val resources: Seq[Resource],
setName: Option[String],
val address: Seq[AddressSet],
val regionType: RegionType.T,
val executable: Boolean,
val fifoId: Option[Int],
val supports: TLMasterToSlaveTransferSizes,
val emits: TLSlaveToMasterTransferSizes,
// By default, slaves are forbidden from issuing 'denied' responses (it prevents Fragmentation)
val alwaysGrantsT: Boolean, // typically only true for CacheCork'd read-write devices; dual: neverReleaseData
// If fifoId=Some, all accesses sent to the same fifoId are executed and ACK'd in FIFO order
// Note: you can only rely on this FIFO behaviour if your TLMasterParameters include requestFifo
val mayDenyGet: Boolean, // applies to: AccessAckData, GrantData
val mayDenyPut: Boolean) // applies to: AccessAck, Grant, HintAck
// ReleaseAck may NEVER be denied
extends SimpleProduct
{
def sortedAddress = address.sorted
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlaveParameters]
override def productPrefix = "TLSlaveParameters"
// We intentionally omit nodePath for equality testing / formatting
def productArity: Int = 11
def productElement(n: Int): Any = n match {
case 0 => name
case 1 => address
case 2 => resources
case 3 => regionType
case 4 => executable
case 5 => fifoId
case 6 => supports
case 7 => emits
case 8 => alwaysGrantsT
case 9 => mayDenyGet
case 10 => mayDenyPut
case _ => throw new IndexOutOfBoundsException(n.toString)
}
def supportsAcquireT: TransferSizes = supports.acquireT
def supportsAcquireB: TransferSizes = supports.acquireB
def supportsArithmetic: TransferSizes = supports.arithmetic
def supportsLogical: TransferSizes = supports.logical
def supportsGet: TransferSizes = supports.get
def supportsPutFull: TransferSizes = supports.putFull
def supportsPutPartial: TransferSizes = supports.putPartial
def supportsHint: TransferSizes = supports.hint
require (!address.isEmpty, "Address cannot be empty")
address.foreach { a => require (a.finite, "Address must be finite") }
address.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") }
require (supportsPutFull.contains(supportsPutPartial), s"PutFull($supportsPutFull) < PutPartial($supportsPutPartial)")
require (supportsPutFull.contains(supportsArithmetic), s"PutFull($supportsPutFull) < Arithmetic($supportsArithmetic)")
require (supportsPutFull.contains(supportsLogical), s"PutFull($supportsPutFull) < Logical($supportsLogical)")
require (supportsGet.contains(supportsArithmetic), s"Get($supportsGet) < Arithmetic($supportsArithmetic)")
require (supportsGet.contains(supportsLogical), s"Get($supportsGet) < Logical($supportsLogical)")
require (supportsAcquireB.contains(supportsAcquireT), s"AcquireB($supportsAcquireB) < AcquireT($supportsAcquireT)")
require (!alwaysGrantsT || supportsAcquireT, s"Must supportAcquireT if promising to always grantT")
// Make sure that the regionType agrees with the capabilities
require (!supportsAcquireB || regionType >= RegionType.UNCACHED) // acquire -> uncached, tracked, cached
require (regionType <= RegionType.UNCACHED || supportsAcquireB) // tracked, cached -> acquire
require (regionType != RegionType.UNCACHED || supportsGet) // uncached -> supportsGet
val name = setName.orElse(nodePath.lastOption.map(_.lazyModule.name)).getOrElse("disconnected")
val maxTransfer = List( // Largest supported transfer of all types
supportsAcquireT.max,
supportsAcquireB.max,
supportsArithmetic.max,
supportsLogical.max,
supportsGet.max,
supportsPutFull.max,
supportsPutPartial.max).max
val maxAddress = address.map(_.max).max
val minAlignment = address.map(_.alignment).min
// The device had better not support a transfer larger than its alignment
require (minAlignment >= maxTransfer, s"Bad $address: minAlignment ($minAlignment) must be >= maxTransfer ($maxTransfer)")
def toResource: ResourceAddress = {
ResourceAddress(address, ResourcePermissions(
r = supportsAcquireB || supportsGet,
w = supportsAcquireT || supportsPutFull,
x = executable,
c = supportsAcquireB,
a = supportsArithmetic && supportsLogical))
}
def findTreeViolation() = nodePath.find {
case _: MixedAdapterNode[_, _, _, _, _, _, _, _] => false
case _: SinkNode[_, _, _, _, _] => false
case node => node.inputs.size != 1
}
def isTree = findTreeViolation() == None
def infoString = {
s"""Slave Name = ${name}
|Slave Address = ${address}
|supports = ${supports.infoString}
|
|""".stripMargin
}
def v1copy(
address: Seq[AddressSet] = address,
resources: Seq[Resource] = resources,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
nodePath: Seq[BaseNode] = nodePath,
supportsAcquireT: TransferSizes = supports.acquireT,
supportsAcquireB: TransferSizes = supports.acquireB,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut,
alwaysGrantsT: Boolean = alwaysGrantsT,
fifoId: Option[Int] = fifoId) =
{
new TLSlaveParameters(
setName = setName,
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supports = TLMasterToSlaveTransferSizes(
acquireT = supportsAcquireT,
acquireB = supportsAcquireB,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = emits,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
def v2copy(
nodePath: Seq[BaseNode] = nodePath,
resources: Seq[Resource] = resources,
name: Option[String] = setName,
address: Seq[AddressSet] = address,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
fifoId: Option[Int] = fifoId,
supports: TLMasterToSlaveTransferSizes = supports,
emits: TLSlaveToMasterTransferSizes = emits,
alwaysGrantsT: Boolean = alwaysGrantsT,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut) =
{
new TLSlaveParameters(
nodePath = nodePath,
resources = resources,
setName = name,
address = address,
regionType = regionType,
executable = executable,
fifoId = fifoId,
supports = supports,
emits = emits,
alwaysGrantsT = alwaysGrantsT,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut)
}
@deprecated("Use v1copy instead of copy","")
def copy(
address: Seq[AddressSet] = address,
resources: Seq[Resource] = resources,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
nodePath: Seq[BaseNode] = nodePath,
supportsAcquireT: TransferSizes = supports.acquireT,
supportsAcquireB: TransferSizes = supports.acquireB,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut,
alwaysGrantsT: Boolean = alwaysGrantsT,
fifoId: Option[Int] = fifoId) =
{
v1copy(
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supportsAcquireT = supportsAcquireT,
supportsAcquireB = supportsAcquireB,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
}
object TLSlaveParameters {
def v1(
address: Seq[AddressSet],
resources: Seq[Resource] = Seq(),
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
nodePath: Seq[BaseNode] = Seq(),
supportsAcquireT: TransferSizes = TransferSizes.none,
supportsAcquireB: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false,
alwaysGrantsT: Boolean = false,
fifoId: Option[Int] = None) =
{
new TLSlaveParameters(
setName = None,
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supports = TLMasterToSlaveTransferSizes(
acquireT = supportsAcquireT,
acquireB = supportsAcquireB,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = TLSlaveToMasterTransferSizes.unknownEmits,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
def v2(
address: Seq[AddressSet],
nodePath: Seq[BaseNode] = Seq(),
resources: Seq[Resource] = Seq(),
name: Option[String] = None,
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
fifoId: Option[Int] = None,
supports: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownSupports,
emits: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownEmits,
alwaysGrantsT: Boolean = false,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false) =
{
new TLSlaveParameters(
nodePath = nodePath,
resources = resources,
setName = name,
address = address,
regionType = regionType,
executable = executable,
fifoId = fifoId,
supports = supports,
emits = emits,
alwaysGrantsT = alwaysGrantsT,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut)
}
}
object TLManagerParameters {
@deprecated("Use TLSlaveParameters.v1 instead of TLManagerParameters","")
def apply(
address: Seq[AddressSet],
resources: Seq[Resource] = Seq(),
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
nodePath: Seq[BaseNode] = Seq(),
supportsAcquireT: TransferSizes = TransferSizes.none,
supportsAcquireB: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false,
alwaysGrantsT: Boolean = false,
fifoId: Option[Int] = None) =
TLSlaveParameters.v1(
address,
resources,
regionType,
executable,
nodePath,
supportsAcquireT,
supportsAcquireB,
supportsArithmetic,
supportsLogical,
supportsGet,
supportsPutFull,
supportsPutPartial,
supportsHint,
mayDenyGet,
mayDenyPut,
alwaysGrantsT,
fifoId,
)
}
case class TLChannelBeatBytes(a: Option[Int], b: Option[Int], c: Option[Int], d: Option[Int])
{
def members = Seq(a, b, c, d)
members.collect { case Some(beatBytes) =>
require (isPow2(beatBytes), "Data channel width must be a power of 2")
}
}
object TLChannelBeatBytes{
def apply(beatBytes: Int): TLChannelBeatBytes = TLChannelBeatBytes(
Some(beatBytes),
Some(beatBytes),
Some(beatBytes),
Some(beatBytes))
def apply(): TLChannelBeatBytes = TLChannelBeatBytes(
None,
None,
None,
None)
}
class TLSlavePortParameters private(
val slaves: Seq[TLSlaveParameters],
val channelBytes: TLChannelBeatBytes,
val endSinkId: Int,
val minLatency: Int,
val responseFields: Seq[BundleFieldBase],
val requestKeys: Seq[BundleKeyBase]) extends SimpleProduct
{
def sortedSlaves = slaves.sortBy(_.sortedAddress.head)
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlavePortParameters]
override def productPrefix = "TLSlavePortParameters"
def productArity: Int = 6
def productElement(n: Int): Any = n match {
case 0 => slaves
case 1 => channelBytes
case 2 => endSinkId
case 3 => minLatency
case 4 => responseFields
case 5 => requestKeys
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!slaves.isEmpty, "Slave ports must have slaves")
require (endSinkId >= 0, "Sink ids cannot be negative")
require (minLatency >= 0, "Minimum required latency cannot be negative")
// Using this API implies you cannot handle mixed-width busses
def beatBytes = {
channelBytes.members.foreach { width =>
require (width.isDefined && width == channelBytes.a)
}
channelBytes.a.get
}
// TODO this should be deprecated
def managers = slaves
def requireFifo(policy: TLFIFOFixer.Policy = TLFIFOFixer.allFIFO) = {
val relevant = slaves.filter(m => policy(m))
relevant.foreach { m =>
require(m.fifoId == relevant.head.fifoId, s"${m.name} had fifoId ${m.fifoId}, which was not homogeneous (${slaves.map(s => (s.name, s.fifoId))}) ")
}
}
// Bounds on required sizes
def maxAddress = slaves.map(_.maxAddress).max
def maxTransfer = slaves.map(_.maxTransfer).max
def mayDenyGet = slaves.exists(_.mayDenyGet)
def mayDenyPut = slaves.exists(_.mayDenyPut)
// Diplomatically determined operation sizes emitted by all outward Slaves
// as opposed to emits* which generate circuitry to check which specific addresses
val allEmitClaims = slaves.map(_.emits).reduce( _ intersect _)
// Operation Emitted by at least one outward Slaves
// as opposed to emits* which generate circuitry to check which specific addresses
val anyEmitClaims = slaves.map(_.emits).reduce(_ mincover _)
// Diplomatically determined operation sizes supported by all outward Slaves
// as opposed to supports* which generate circuitry to check which specific addresses
val allSupportClaims = slaves.map(_.supports).reduce( _ intersect _)
val allSupportAcquireT = allSupportClaims.acquireT
val allSupportAcquireB = allSupportClaims.acquireB
val allSupportArithmetic = allSupportClaims.arithmetic
val allSupportLogical = allSupportClaims.logical
val allSupportGet = allSupportClaims.get
val allSupportPutFull = allSupportClaims.putFull
val allSupportPutPartial = allSupportClaims.putPartial
val allSupportHint = allSupportClaims.hint
// Operation supported by at least one outward Slaves
// as opposed to supports* which generate circuitry to check which specific addresses
val anySupportClaims = slaves.map(_.supports).reduce(_ mincover _)
val anySupportAcquireT = !anySupportClaims.acquireT.none
val anySupportAcquireB = !anySupportClaims.acquireB.none
val anySupportArithmetic = !anySupportClaims.arithmetic.none
val anySupportLogical = !anySupportClaims.logical.none
val anySupportGet = !anySupportClaims.get.none
val anySupportPutFull = !anySupportClaims.putFull.none
val anySupportPutPartial = !anySupportClaims.putPartial.none
val anySupportHint = !anySupportClaims.hint.none
// Supporting Acquire means being routable for GrantAck
require ((endSinkId == 0) == !anySupportAcquireB)
// These return Option[TLSlaveParameters] for your convenience
def find(address: BigInt) = slaves.find(_.address.exists(_.contains(address)))
// The safe version will check the entire address
def findSafe(address: UInt) = VecInit(sortedSlaves.map(_.address.map(_.contains(address)).reduce(_ || _)))
// The fast version assumes the address is valid (you probably want fastProperty instead of this function)
def findFast(address: UInt) = {
val routingMask = AddressDecoder(slaves.map(_.address))
VecInit(sortedSlaves.map(_.address.map(_.widen(~routingMask)).distinct.map(_.contains(address)).reduce(_ || _)))
}
// Compute the simplest AddressSets that decide a key
def fastPropertyGroup[K](p: TLSlaveParameters => K): Seq[(K, Seq[AddressSet])] = {
val groups = groupByIntoSeq(sortedSlaves.map(m => (p(m), m.address)))( _._1).map { case (k, vs) =>
k -> vs.flatMap(_._2)
}
val reductionMask = AddressDecoder(groups.map(_._2))
groups.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~reductionMask)).distinct) }
}
// Select a property
def fastProperty[K, D <: Data](address: UInt, p: TLSlaveParameters => K, d: K => D): D =
Mux1H(fastPropertyGroup(p).map { case (v, a) => (a.map(_.contains(address)).reduce(_||_), d(v)) })
// Note: returns the actual fifoId + 1 or 0 if None
def findFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.map(_+1).getOrElse(0), (i:Int) => i.U)
def hasFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.isDefined, (b:Boolean) => b.B)
// Does this Port manage this ID/address?
def containsSafe(address: UInt) = findSafe(address).reduce(_ || _)
private def addressHelper(
// setting safe to false indicates that all addresses are expected to be legal, which might reduce circuit complexity
safe: Boolean,
// member filters out the sizes being checked based on the opcode being emitted or supported
member: TLSlaveParameters => TransferSizes,
address: UInt,
lgSize: UInt,
// range provides a limit on the sizes that are expected to be evaluated, which might reduce circuit complexity
range: Option[TransferSizes]): Bool = {
// trim reduces circuit complexity by intersecting checked sizes with the range argument
def trim(x: TransferSizes) = range.map(_.intersect(x)).getOrElse(x)
// groupBy returns an unordered map, convert back to Seq and sort the result for determinism
// groupByIntoSeq is turning slaves into trimmed membership sizes
// We are grouping all the slaves by their transfer size where
// if they support the trimmed size then
// member is the type of transfer that you are looking for (What you are trying to filter on)
// When you consider membership, you are trimming the sizes to only the ones that you care about
// you are filtering the slaves based on both whether they support a particular opcode and the size
// Grouping the slaves based on the actual transfer size range they support
// intersecting the range and checking their membership
// FOR SUPPORTCASES instead of returning the list of slaves,
// you are returning a map from transfer size to the set of
// address sets that are supported for that transfer size
// find all the slaves that support a certain type of operation and then group their addresses by the supported size
// for every size there could be multiple address ranges
// safety is a trade off between checking between all possible addresses vs only the addresses
// that are known to have supported sizes
// the trade off is 'checking all addresses is a more expensive circuit but will always give you
// the right answer even if you give it an illegal address'
// the not safe version is a cheaper circuit but if you give it an illegal address then it might produce the wrong answer
// fast presumes address legality
// This groupByIntoSeq deterministically groups all address sets for which a given `member` transfer size applies.
// In the resulting Map of cases, the keys are transfer sizes and the values are all address sets which emit or support that size.
val supportCases = groupByIntoSeq(slaves)(m => trim(member(m))).map { case (k: TransferSizes, vs: Seq[TLSlaveParameters]) =>
k -> vs.flatMap(_.address)
}
// safe produces a circuit that compares against all possible addresses,
// whereas fast presumes that the address is legal but uses an efficient address decoder
val mask = if (safe) ~BigInt(0) else AddressDecoder(supportCases.map(_._2))
// Simplified creates the most concise possible representation of each cases' address sets based on the mask.
val simplified = supportCases.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~mask)).distinct) }
simplified.map { case (s, a) =>
// s is a size, you are checking for this size either the size of the operation is in s
// We return an or-reduction of all the cases, checking whether any contains both the dynamic size and dynamic address on the wire.
((Some(s) == range).B || s.containsLg(lgSize)) &&
a.map(_.contains(address)).reduce(_||_)
}.foldLeft(false.B)(_||_)
}
def supportsAcquireTSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireT, address, lgSize, range)
def supportsAcquireBSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireB, address, lgSize, range)
def supportsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.arithmetic, address, lgSize, range)
def supportsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.logical, address, lgSize, range)
def supportsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.get, address, lgSize, range)
def supportsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putFull, address, lgSize, range)
def supportsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putPartial, address, lgSize, range)
def supportsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.hint, address, lgSize, range)
def supportsAcquireTFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireT, address, lgSize, range)
def supportsAcquireBFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireB, address, lgSize, range)
def supportsArithmeticFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.arithmetic, address, lgSize, range)
def supportsLogicalFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.logical, address, lgSize, range)
def supportsGetFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.get, address, lgSize, range)
def supportsPutFullFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putFull, address, lgSize, range)
def supportsPutPartialFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putPartial, address, lgSize, range)
def supportsHintFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.hint, address, lgSize, range)
def emitsProbeSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.probe, address, lgSize, range)
def emitsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.arithmetic, address, lgSize, range)
def emitsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.logical, address, lgSize, range)
def emitsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.get, address, lgSize, range)
def emitsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putFull, address, lgSize, range)
def emitsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putPartial, address, lgSize, range)
def emitsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.hint, address, lgSize, range)
def findTreeViolation() = slaves.flatMap(_.findTreeViolation()).headOption
def isTree = !slaves.exists(!_.isTree)
def infoString = "Slave Port Beatbytes = " + beatBytes + "\n" + "Slave Port MinLatency = " + minLatency + "\n\n" + slaves.map(_.infoString).mkString
def v1copy(
managers: Seq[TLSlaveParameters] = slaves,
beatBytes: Int = -1,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
new TLSlavePortParameters(
slaves = managers,
channelBytes = if (beatBytes != -1) TLChannelBeatBytes(beatBytes) else channelBytes,
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
def v2copy(
slaves: Seq[TLSlaveParameters] = slaves,
channelBytes: TLChannelBeatBytes = channelBytes,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
new TLSlavePortParameters(
slaves = slaves,
channelBytes = channelBytes,
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
@deprecated("Use v1copy instead of copy","")
def copy(
managers: Seq[TLSlaveParameters] = slaves,
beatBytes: Int = -1,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
v1copy(
managers,
beatBytes,
endSinkId,
minLatency,
responseFields,
requestKeys)
}
}
object TLSlavePortParameters {
def v1(
managers: Seq[TLSlaveParameters],
beatBytes: Int,
endSinkId: Int = 0,
minLatency: Int = 0,
responseFields: Seq[BundleFieldBase] = Nil,
requestKeys: Seq[BundleKeyBase] = Nil) =
{
new TLSlavePortParameters(
slaves = managers,
channelBytes = TLChannelBeatBytes(beatBytes),
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
}
object TLManagerPortParameters {
@deprecated("Use TLSlavePortParameters.v1 instead of TLManagerPortParameters","")
def apply(
managers: Seq[TLSlaveParameters],
beatBytes: Int,
endSinkId: Int = 0,
minLatency: Int = 0,
responseFields: Seq[BundleFieldBase] = Nil,
requestKeys: Seq[BundleKeyBase] = Nil) =
{
TLSlavePortParameters.v1(
managers,
beatBytes,
endSinkId,
minLatency,
responseFields,
requestKeys)
}
}
class TLMasterParameters private(
val nodePath: Seq[BaseNode],
val resources: Seq[Resource],
val name: String,
val visibility: Seq[AddressSet],
val unusedRegionTypes: Set[RegionType.T],
val executesOnly: Boolean,
val requestFifo: Boolean, // only a request, not a requirement. applies to A, not C.
val supports: TLSlaveToMasterTransferSizes,
val emits: TLMasterToSlaveTransferSizes,
val neverReleasesData: Boolean,
val sourceId: IdRange) extends SimpleProduct
{
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterParameters]
override def productPrefix = "TLMasterParameters"
// We intentionally omit nodePath for equality testing / formatting
def productArity: Int = 10
def productElement(n: Int): Any = n match {
case 0 => name
case 1 => sourceId
case 2 => resources
case 3 => visibility
case 4 => unusedRegionTypes
case 5 => executesOnly
case 6 => requestFifo
case 7 => supports
case 8 => emits
case 9 => neverReleasesData
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!sourceId.isEmpty)
require (!visibility.isEmpty)
require (supports.putFull.contains(supports.putPartial))
// We only support these operations if we support Probe (ie: we're a cache)
require (supports.probe.contains(supports.arithmetic))
require (supports.probe.contains(supports.logical))
require (supports.probe.contains(supports.get))
require (supports.probe.contains(supports.putFull))
require (supports.probe.contains(supports.putPartial))
require (supports.probe.contains(supports.hint))
visibility.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") }
val maxTransfer = List(
supports.probe.max,
supports.arithmetic.max,
supports.logical.max,
supports.get.max,
supports.putFull.max,
supports.putPartial.max).max
def infoString = {
s"""Master Name = ${name}
|visibility = ${visibility}
|emits = ${emits.infoString}
|sourceId = ${sourceId}
|
|""".stripMargin
}
def v1copy(
name: String = name,
sourceId: IdRange = sourceId,
nodePath: Seq[BaseNode] = nodePath,
requestFifo: Boolean = requestFifo,
visibility: Seq[AddressSet] = visibility,
supportsProbe: TransferSizes = supports.probe,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = this.resources,
name = name,
visibility = visibility,
unusedRegionTypes = this.unusedRegionTypes,
executesOnly = this.executesOnly,
requestFifo = requestFifo,
supports = TLSlaveToMasterTransferSizes(
probe = supportsProbe,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = this.emits,
neverReleasesData = this.neverReleasesData,
sourceId = sourceId)
}
def v2copy(
nodePath: Seq[BaseNode] = nodePath,
resources: Seq[Resource] = resources,
name: String = name,
visibility: Seq[AddressSet] = visibility,
unusedRegionTypes: Set[RegionType.T] = unusedRegionTypes,
executesOnly: Boolean = executesOnly,
requestFifo: Boolean = requestFifo,
supports: TLSlaveToMasterTransferSizes = supports,
emits: TLMasterToSlaveTransferSizes = emits,
neverReleasesData: Boolean = neverReleasesData,
sourceId: IdRange = sourceId) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = resources,
name = name,
visibility = visibility,
unusedRegionTypes = unusedRegionTypes,
executesOnly = executesOnly,
requestFifo = requestFifo,
supports = supports,
emits = emits,
neverReleasesData = neverReleasesData,
sourceId = sourceId)
}
@deprecated("Use v1copy instead of copy","")
def copy(
name: String = name,
sourceId: IdRange = sourceId,
nodePath: Seq[BaseNode] = nodePath,
requestFifo: Boolean = requestFifo,
visibility: Seq[AddressSet] = visibility,
supportsProbe: TransferSizes = supports.probe,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint) =
{
v1copy(
name = name,
sourceId = sourceId,
nodePath = nodePath,
requestFifo = requestFifo,
visibility = visibility,
supportsProbe = supportsProbe,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint)
}
}
object TLMasterParameters {
def v1(
name: String,
sourceId: IdRange = IdRange(0,1),
nodePath: Seq[BaseNode] = Seq(),
requestFifo: Boolean = false,
visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)),
supportsProbe: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = Nil,
name = name,
visibility = visibility,
unusedRegionTypes = Set(),
executesOnly = false,
requestFifo = requestFifo,
supports = TLSlaveToMasterTransferSizes(
probe = supportsProbe,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = TLMasterToSlaveTransferSizes.unknownEmits,
neverReleasesData = false,
sourceId = sourceId)
}
def v2(
nodePath: Seq[BaseNode] = Seq(),
resources: Seq[Resource] = Nil,
name: String,
visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)),
unusedRegionTypes: Set[RegionType.T] = Set(),
executesOnly: Boolean = false,
requestFifo: Boolean = false,
supports: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownSupports,
emits: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownEmits,
neverReleasesData: Boolean = false,
sourceId: IdRange = IdRange(0,1)) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = resources,
name = name,
visibility = visibility,
unusedRegionTypes = unusedRegionTypes,
executesOnly = executesOnly,
requestFifo = requestFifo,
supports = supports,
emits = emits,
neverReleasesData = neverReleasesData,
sourceId = sourceId)
}
}
object TLClientParameters {
@deprecated("Use TLMasterParameters.v1 instead of TLClientParameters","")
def apply(
name: String,
sourceId: IdRange = IdRange(0,1),
nodePath: Seq[BaseNode] = Seq(),
requestFifo: Boolean = false,
visibility: Seq[AddressSet] = Seq(AddressSet.everything),
supportsProbe: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none) =
{
TLMasterParameters.v1(
name = name,
sourceId = sourceId,
nodePath = nodePath,
requestFifo = requestFifo,
visibility = visibility,
supportsProbe = supportsProbe,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint)
}
}
class TLMasterPortParameters private(
val masters: Seq[TLMasterParameters],
val channelBytes: TLChannelBeatBytes,
val minLatency: Int,
val echoFields: Seq[BundleFieldBase],
val requestFields: Seq[BundleFieldBase],
val responseKeys: Seq[BundleKeyBase]) extends SimpleProduct
{
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterPortParameters]
override def productPrefix = "TLMasterPortParameters"
def productArity: Int = 6
def productElement(n: Int): Any = n match {
case 0 => masters
case 1 => channelBytes
case 2 => minLatency
case 3 => echoFields
case 4 => requestFields
case 5 => responseKeys
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!masters.isEmpty)
require (minLatency >= 0)
def clients = masters
// Require disjoint ranges for Ids
IdRange.overlaps(masters.map(_.sourceId)).foreach { case (x, y) =>
require (!x.overlaps(y), s"TLClientParameters.sourceId ${x} overlaps ${y}")
}
// Bounds on required sizes
def endSourceId = masters.map(_.sourceId.end).max
def maxTransfer = masters.map(_.maxTransfer).max
// The unused sources < endSourceId
def unusedSources: Seq[Int] = {
val usedSources = masters.map(_.sourceId).sortBy(_.start)
((Seq(0) ++ usedSources.map(_.end)) zip usedSources.map(_.start)) flatMap { case (end, start) =>
end until start
}
}
// Diplomatically determined operation sizes emitted by all inward Masters
// as opposed to emits* which generate circuitry to check which specific addresses
val allEmitClaims = masters.map(_.emits).reduce( _ intersect _)
// Diplomatically determined operation sizes Emitted by at least one inward Masters
// as opposed to emits* which generate circuitry to check which specific addresses
val anyEmitClaims = masters.map(_.emits).reduce(_ mincover _)
// Diplomatically determined operation sizes supported by all inward Masters
// as opposed to supports* which generate circuitry to check which specific addresses
val allSupportProbe = masters.map(_.supports.probe) .reduce(_ intersect _)
val allSupportArithmetic = masters.map(_.supports.arithmetic).reduce(_ intersect _)
val allSupportLogical = masters.map(_.supports.logical) .reduce(_ intersect _)
val allSupportGet = masters.map(_.supports.get) .reduce(_ intersect _)
val allSupportPutFull = masters.map(_.supports.putFull) .reduce(_ intersect _)
val allSupportPutPartial = masters.map(_.supports.putPartial).reduce(_ intersect _)
val allSupportHint = masters.map(_.supports.hint) .reduce(_ intersect _)
// Diplomatically determined operation sizes supported by at least one master
// as opposed to supports* which generate circuitry to check which specific addresses
val anySupportProbe = masters.map(!_.supports.probe.none) .reduce(_ || _)
val anySupportArithmetic = masters.map(!_.supports.arithmetic.none).reduce(_ || _)
val anySupportLogical = masters.map(!_.supports.logical.none) .reduce(_ || _)
val anySupportGet = masters.map(!_.supports.get.none) .reduce(_ || _)
val anySupportPutFull = masters.map(!_.supports.putFull.none) .reduce(_ || _)
val anySupportPutPartial = masters.map(!_.supports.putPartial.none).reduce(_ || _)
val anySupportHint = masters.map(!_.supports.hint.none) .reduce(_ || _)
// These return Option[TLMasterParameters] for your convenience
def find(id: Int) = masters.find(_.sourceId.contains(id))
// Synthesizable lookup methods
def find(id: UInt) = VecInit(masters.map(_.sourceId.contains(id)))
def contains(id: UInt) = find(id).reduce(_ || _)
def requestFifo(id: UInt) = Mux1H(find(id), masters.map(c => c.requestFifo.B))
// Available during RTL runtime, checks to see if (id, size) is supported by the master's (client's) diplomatic parameters
private def sourceIdHelper(member: TLMasterParameters => TransferSizes)(id: UInt, lgSize: UInt) = {
val allSame = masters.map(member(_) == member(masters(0))).reduce(_ && _)
// this if statement is a coarse generalization of the groupBy in the sourceIdHelper2 version;
// the case where there is only one group.
if (allSame) member(masters(0)).containsLg(lgSize) else {
// Find the master associated with ID and returns whether that particular master is able to receive transaction of lgSize
Mux1H(find(id), masters.map(member(_).containsLg(lgSize)))
}
}
// Check for support of a given operation at a specific id
val supportsProbe = sourceIdHelper(_.supports.probe) _
val supportsArithmetic = sourceIdHelper(_.supports.arithmetic) _
val supportsLogical = sourceIdHelper(_.supports.logical) _
val supportsGet = sourceIdHelper(_.supports.get) _
val supportsPutFull = sourceIdHelper(_.supports.putFull) _
val supportsPutPartial = sourceIdHelper(_.supports.putPartial) _
val supportsHint = sourceIdHelper(_.supports.hint) _
// TODO: Merge sourceIdHelper2 with sourceIdHelper
private def sourceIdHelper2(
member: TLMasterParameters => TransferSizes,
sourceId: UInt,
lgSize: UInt): Bool = {
// Because sourceIds are uniquely owned by each master, we use them to group the
// cases that have to be checked.
val emitCases = groupByIntoSeq(masters)(m => member(m)).map { case (k, vs) =>
k -> vs.map(_.sourceId)
}
emitCases.map { case (s, a) =>
(s.containsLg(lgSize)) &&
a.map(_.contains(sourceId)).reduce(_||_)
}.foldLeft(false.B)(_||_)
}
// Check for emit of a given operation at a specific id
def emitsAcquireT (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireT, sourceId, lgSize)
def emitsAcquireB (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireB, sourceId, lgSize)
def emitsArithmetic(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.arithmetic, sourceId, lgSize)
def emitsLogical (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.logical, sourceId, lgSize)
def emitsGet (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.get, sourceId, lgSize)
def emitsPutFull (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putFull, sourceId, lgSize)
def emitsPutPartial(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putPartial, sourceId, lgSize)
def emitsHint (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.hint, sourceId, lgSize)
def infoString = masters.map(_.infoString).mkString
def v1copy(
clients: Seq[TLMasterParameters] = masters,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
new TLMasterPortParameters(
masters = clients,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
def v2copy(
masters: Seq[TLMasterParameters] = masters,
channelBytes: TLChannelBeatBytes = channelBytes,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
new TLMasterPortParameters(
masters = masters,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
@deprecated("Use v1copy instead of copy","")
def copy(
clients: Seq[TLMasterParameters] = masters,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
v1copy(
clients,
minLatency,
echoFields,
requestFields,
responseKeys)
}
}
object TLClientPortParameters {
@deprecated("Use TLMasterPortParameters.v1 instead of TLClientPortParameters","")
def apply(
clients: Seq[TLMasterParameters],
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
TLMasterPortParameters.v1(
clients,
minLatency,
echoFields,
requestFields,
responseKeys)
}
}
object TLMasterPortParameters {
def v1(
clients: Seq[TLMasterParameters],
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
new TLMasterPortParameters(
masters = clients,
channelBytes = TLChannelBeatBytes(),
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
def v2(
masters: Seq[TLMasterParameters],
channelBytes: TLChannelBeatBytes = TLChannelBeatBytes(),
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
new TLMasterPortParameters(
masters = masters,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
}
case class TLBundleParameters(
addressBits: Int,
dataBits: Int,
sourceBits: Int,
sinkBits: Int,
sizeBits: Int,
echoFields: Seq[BundleFieldBase],
requestFields: Seq[BundleFieldBase],
responseFields: Seq[BundleFieldBase],
hasBCE: Boolean)
{
// Chisel has issues with 0-width wires
require (addressBits >= 1)
require (dataBits >= 8)
require (sourceBits >= 1)
require (sinkBits >= 1)
require (sizeBits >= 1)
require (isPow2(dataBits))
echoFields.foreach { f => require (f.key.isControl, s"${f} is not a legal echo field") }
val addrLoBits = log2Up(dataBits/8)
// Used to uniquify bus IP names
def shortName = s"a${addressBits}d${dataBits}s${sourceBits}k${sinkBits}z${sizeBits}" + (if (hasBCE) "c" else "u")
def union(x: TLBundleParameters) =
TLBundleParameters(
max(addressBits, x.addressBits),
max(dataBits, x.dataBits),
max(sourceBits, x.sourceBits),
max(sinkBits, x.sinkBits),
max(sizeBits, x.sizeBits),
echoFields = BundleField.union(echoFields ++ x.echoFields),
requestFields = BundleField.union(requestFields ++ x.requestFields),
responseFields = BundleField.union(responseFields ++ x.responseFields),
hasBCE || x.hasBCE)
}
object TLBundleParameters
{
val emptyBundleParams = TLBundleParameters(
addressBits = 1,
dataBits = 8,
sourceBits = 1,
sinkBits = 1,
sizeBits = 1,
echoFields = Nil,
requestFields = Nil,
responseFields = Nil,
hasBCE = false)
def union(x: Seq[TLBundleParameters]) = x.foldLeft(emptyBundleParams)((x,y) => x.union(y))
def apply(master: TLMasterPortParameters, slave: TLSlavePortParameters) =
new TLBundleParameters(
addressBits = log2Up(slave.maxAddress + 1),
dataBits = slave.beatBytes * 8,
sourceBits = log2Up(master.endSourceId),
sinkBits = log2Up(slave.endSinkId),
sizeBits = log2Up(log2Ceil(max(master.maxTransfer, slave.maxTransfer))+1),
echoFields = master.echoFields,
requestFields = BundleField.accept(master.requestFields, slave.requestKeys),
responseFields = BundleField.accept(slave.responseFields, master.responseKeys),
hasBCE = master.anySupportProbe && slave.anySupportAcquireB)
}
case class TLEdgeParameters(
master: TLMasterPortParameters,
slave: TLSlavePortParameters,
params: Parameters,
sourceInfo: SourceInfo) extends FormatEdge
{
// legacy names:
def manager = slave
def client = master
val maxTransfer = max(master.maxTransfer, slave.maxTransfer)
val maxLgSize = log2Ceil(maxTransfer)
// Sanity check the link...
require (maxTransfer >= slave.beatBytes, s"Link's max transfer (${maxTransfer}) < ${slave.slaves.map(_.name)}'s beatBytes (${slave.beatBytes})")
def diplomaticClaimsMasterToSlave = master.anyEmitClaims.intersect(slave.anySupportClaims)
val bundle = TLBundleParameters(master, slave)
def formatEdge = master.infoString + "\n" + slave.infoString
}
case class TLCreditedDelay(
a: CreditedDelay,
b: CreditedDelay,
c: CreditedDelay,
d: CreditedDelay,
e: CreditedDelay)
{
def + (that: TLCreditedDelay): TLCreditedDelay = TLCreditedDelay(
a = a + that.a,
b = b + that.b,
c = c + that.c,
d = d + that.d,
e = e + that.e)
override def toString = s"(${a}, ${b}, ${c}, ${d}, ${e})"
}
object TLCreditedDelay {
def apply(delay: CreditedDelay): TLCreditedDelay = apply(delay, delay.flip, delay, delay.flip, delay)
}
case class TLCreditedManagerPortParameters(delay: TLCreditedDelay, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLCreditedClientPortParameters(delay: TLCreditedDelay, base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLCreditedEdgeParameters(client: TLCreditedClientPortParameters, manager: TLCreditedManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val delay = client.delay + manager.delay
val bundle = TLBundleParameters(client.base, manager.base)
def formatEdge = client.infoString + "\n" + manager.infoString
}
case class TLAsyncManagerPortParameters(async: AsyncQueueParams, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLAsyncClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLAsyncBundleParameters(async: AsyncQueueParams, base: TLBundleParameters)
case class TLAsyncEdgeParameters(client: TLAsyncClientPortParameters, manager: TLAsyncManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val bundle = TLAsyncBundleParameters(manager.async, TLBundleParameters(client.base, manager.base))
def formatEdge = client.infoString + "\n" + manager.infoString
}
case class TLRationalManagerPortParameters(direction: RationalDirection, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLRationalClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLRationalEdgeParameters(client: TLRationalClientPortParameters, manager: TLRationalManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val bundle = TLBundleParameters(client.base, manager.base)
def formatEdge = client.infoString + "\n" + manager.infoString
}
// To be unified, devices must agree on all of these terms
case class ManagerUnificationKey(
resources: Seq[Resource],
regionType: RegionType.T,
executable: Boolean,
supportsAcquireT: TransferSizes,
supportsAcquireB: TransferSizes,
supportsArithmetic: TransferSizes,
supportsLogical: TransferSizes,
supportsGet: TransferSizes,
supportsPutFull: TransferSizes,
supportsPutPartial: TransferSizes,
supportsHint: TransferSizes)
object ManagerUnificationKey
{
def apply(x: TLSlaveParameters): ManagerUnificationKey = ManagerUnificationKey(
resources = x.resources,
regionType = x.regionType,
executable = x.executable,
supportsAcquireT = x.supportsAcquireT,
supportsAcquireB = x.supportsAcquireB,
supportsArithmetic = x.supportsArithmetic,
supportsLogical = x.supportsLogical,
supportsGet = x.supportsGet,
supportsPutFull = x.supportsPutFull,
supportsPutPartial = x.supportsPutPartial,
supportsHint = x.supportsHint)
}
object ManagerUnification
{
def apply(slaves: Seq[TLSlaveParameters]): List[TLSlaveParameters] = {
slaves.groupBy(ManagerUnificationKey.apply).values.map { seq =>
val agree = seq.forall(_.fifoId == seq.head.fifoId)
seq(0).v1copy(
address = AddressSet.unify(seq.flatMap(_.address)),
fifoId = if (agree) seq(0).fifoId else None)
}.toList
}
}
case class TLBufferParams(
a: BufferParams = BufferParams.none,
b: BufferParams = BufferParams.none,
c: BufferParams = BufferParams.none,
d: BufferParams = BufferParams.none,
e: BufferParams = BufferParams.none
) extends DirectedBuffers[TLBufferParams] {
def copyIn(x: BufferParams) = this.copy(b = x, d = x)
def copyOut(x: BufferParams) = this.copy(a = x, c = x, e = x)
def copyInOut(x: BufferParams) = this.copyIn(x).copyOut(x)
}
/** Pretty printing of TL source id maps */
class TLSourceIdMap(tl: TLMasterPortParameters) extends IdMap[TLSourceIdMapEntry] {
private val tlDigits = String.valueOf(tl.endSourceId-1).length()
protected val fmt = s"\t[%${tlDigits}d, %${tlDigits}d) %s%s%s"
private val sorted = tl.masters.sortBy(_.sourceId)
val mapping: Seq[TLSourceIdMapEntry] = sorted.map { case c =>
TLSourceIdMapEntry(c.sourceId, c.name, c.supports.probe, c.requestFifo)
}
}
case class TLSourceIdMapEntry(tlId: IdRange, name: String, isCache: Boolean, requestFifo: Boolean)
extends IdMapEntry
{
val from = tlId
val to = tlId
val maxTransactionsInFlight = Some(tlId.size)
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_46( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [3:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [31:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7]
wire [3:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_a_bits_source = 1'h0; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire c_set = 1'h0; // @[Monitor.scala:738:34]
wire c_set_wo_ready = 1'h0; // @[Monitor.scala:739:34]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [8:0] c_first_beats1_decode = 9'h0; // @[Edges.scala:220:59]
wire [8:0] c_first_beats1 = 9'h0; // @[Edges.scala:221:14]
wire [8:0] _c_first_count_T = 9'h0; // @[Edges.scala:234:27]
wire [8:0] c_first_count = 9'h0; // @[Edges.scala:234:25]
wire [8:0] _c_first_counter_T = 9'h0; // @[Edges.scala:236:21]
wire _source_ok_T = 1'h1; // @[Parameters.scala:46:9]
wire _source_ok_WIRE_0 = 1'h1; // @[Parameters.scala:1138:31]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [8:0] c_first_counter1 = 9'h1FF; // @[Edges.scala:230:28]
wire [9:0] _c_first_counter1_T = 10'h3FF; // @[Edges.scala:230:28]
wire [2:0] io_in_a_bits_param = 3'h0; // @[Monitor.scala:36:7]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_first_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_first_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_first_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_first_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_set_wo_ready_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_set_wo_ready_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_opcodes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_opcodes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_sizes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_sizes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_opcodes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_opcodes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_sizes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_sizes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_probe_ack_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_probe_ack_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_probe_ack_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_probe_ack_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_4_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_5_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [3:0] _a_opcodes_set_T = 4'h0; // @[Monitor.scala:659:79]
wire [3:0] _a_sizes_set_T = 4'h0; // @[Monitor.scala:660:77]
wire [3:0] _c_first_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_first_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_first_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_first_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] c_opcodes_set = 4'h0; // @[Monitor.scala:740:34]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] _c_set_wo_ready_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_set_wo_ready_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_opcodes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_sizes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_opcodes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_T = 4'h0; // @[Monitor.scala:767:79]
wire [3:0] _c_sizes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_sizes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_sizes_set_T = 4'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_probe_ack_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_probe_ack_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_probe_ack_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_probe_ack_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hFF; // @[Monitor.scala:612:57]
wire [15:0] _c_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hFF; // @[Monitor.scala:724:57]
wire [16:0] _a_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hFF; // @[Monitor.scala:612:57]
wire [16:0] _c_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hFF; // @[Monitor.scala:724:57]
wire [15:0] _a_size_lookup_T_3 = 16'h100; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h100; // @[Monitor.scala:612:51]
wire [15:0] _c_size_lookup_T_3 = 16'h100; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h100; // @[Monitor.scala:724:51]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [19:0] _c_sizes_set_T_1 = 20'h0; // @[Monitor.scala:768:52]
wire [18:0] _c_opcodes_set_T_1 = 19'h0; // @[Monitor.scala:767:54]
wire [4:0] _c_sizes_set_interm_T_1 = 5'h1; // @[Monitor.scala:766:59]
wire [4:0] c_sizes_set_interm = 5'h0; // @[Monitor.scala:755:40]
wire [4:0] _c_sizes_set_interm_T = 5'h0; // @[Monitor.scala:766:51]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [1:0] _a_set_wo_ready_T = 2'h1; // @[OneHot.scala:58:35]
wire [1:0] _a_set_T = 2'h1; // @[OneHot.scala:58:35]
wire [1:0] _c_set_wo_ready_T = 2'h1; // @[OneHot.scala:58:35]
wire [1:0] _c_set_T = 2'h1; // @[OneHot.scala:58:35]
wire [7:0] c_sizes_set = 8'h0; // @[Monitor.scala:741:34]
wire [11:0] _c_first_beats1_decode_T_2 = 12'h0; // @[package.scala:243:46]
wire [11:0] _c_first_beats1_decode_T_1 = 12'hFFF; // @[package.scala:243:76]
wire [26:0] _c_first_beats1_decode_T = 27'hFFF; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_size_lookup_T_2 = 4'h8; // @[Monitor.scala:641:117]
wire [3:0] _d_sizes_clr_T = 4'h8; // @[Monitor.scala:681:48]
wire [3:0] _c_size_lookup_T_2 = 4'h8; // @[Monitor.scala:750:119]
wire [3:0] _d_sizes_clr_T_6 = 4'h8; // @[Monitor.scala:791:48]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [26:0] _GEN = 27'hFFF << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [26:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [26:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [26:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [11:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [31:0] _is_aligned_T = {20'h0, io_in_a_bits_address_0[11:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 32'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 4'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire _source_ok_T_1 = ~io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_1; // @[Parameters.scala:1138:31]
wire _T_1081 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1081; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1081; // @[Decoupled.scala:51:35]
wire [11:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [8:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] a_first_counter; // @[Edges.scala:229:27]
wire [9:0] _a_first_counter1_T = {1'h0, a_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] a_first_counter1 = _a_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [8:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [3:0] size; // @[Monitor.scala:389:22]
reg [31:0] address; // @[Monitor.scala:391:22]
wire _T_1154 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1154; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1154; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1154; // @[Decoupled.scala:51:35]
wire [26:0] _GEN_0 = 27'hFFF << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [11:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [8:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T = {1'h0, d_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1 = _d_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [3:0] size_1; // @[Monitor.scala:540:22]
reg source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [1:0] inflight; // @[Monitor.scala:614:27]
reg [3:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [7:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [11:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [8:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] a_first_counter1_1 = _a_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [11:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire [8:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1_1 = _d_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire a_set; // @[Monitor.scala:626:34]
wire a_set_wo_ready; // @[Monitor.scala:627:34]
wire [3:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [7:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [3:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [3:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [3:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [3:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [3:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [3:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [15:0] _a_opcode_lookup_T_6 = {12'h0, _a_opcode_lookup_T_1}; // @[Monitor.scala:637:{44,97}]
wire [15:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [7:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [3:0] _GEN_2 = {io_in_d_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :641:65]
wire [3:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65]
wire [3:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_2; // @[Monitor.scala:641:65, :681:99]
wire [3:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65, :750:67]
wire [3:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_2; // @[Monitor.scala:641:65, :791:99]
wire [7:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [15:0] _a_size_lookup_T_6 = {8'h0, _a_size_lookup_T_1}; // @[Monitor.scala:641:{40,91}]
wire [15:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[15:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[7:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [4:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _T_1004 = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26]
assign a_set_wo_ready = _T_1004; // @[Monitor.scala:627:34, :651:26]
wire _same_cycle_resp_T; // @[Monitor.scala:684:44]
assign _same_cycle_resp_T = _T_1004; // @[Monitor.scala:651:26, :684:44]
assign a_set = _T_1081 & a_first_1; // @[Decoupled.scala:51:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = a_set ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:626:34, :646:40, :655:70, :657:{28,61}]
wire [4:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [4:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = a_set ? _a_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:626:34, :648:38, :655:70, :658:{28,59}]
wire [18:0] _a_opcodes_set_T_1 = {15'h0, a_opcodes_set_interm}; // @[Monitor.scala:646:40, :659:54]
assign a_opcodes_set = a_set ? _a_opcodes_set_T_1[3:0] : 4'h0; // @[Monitor.scala:626:34, :630:33, :655:70, :659:{28,54}]
wire [19:0] _a_sizes_set_T_1 = {15'h0, a_sizes_set_interm}; // @[Monitor.scala:648:38, :660:52]
assign a_sizes_set = a_set ? _a_sizes_set_T_1[7:0] : 8'h0; // @[Monitor.scala:626:34, :632:31, :655:70, :660:{28,52}]
wire d_clr; // @[Monitor.scala:664:34]
wire d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [3:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [7:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_3 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_3; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_3; // @[Monitor.scala:673:46, :783:46]
wire _T_1053 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [1:0] _GEN_4 = {1'h0, io_in_d_bits_source_0}; // @[OneHot.scala:58:35]
wire [1:0] _GEN_5 = 2'h1 << _GEN_4; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1053 & ~d_release_ack & _d_clr_wo_ready_T[0]; // @[OneHot.scala:58:35]
wire _T_1022 = _T_1154 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_1022 & _d_clr_T[0]; // @[OneHot.scala:58:35]
wire [30:0] _d_opcodes_clr_T_5 = 31'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_1022 ? _d_opcodes_clr_T_5[3:0] : 4'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [30:0] _d_sizes_clr_T_5 = 31'hFF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_1022 ? _d_sizes_clr_T_5[7:0] : 8'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = ~io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [1:0] _inflight_T = {inflight[1], inflight[0] | a_set}; // @[Monitor.scala:614:27, :626:34, :705:27]
wire _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [1:0] _inflight_T_2 = {1'h0, _inflight_T[0] & _inflight_T_1}; // @[Monitor.scala:705:{27,36,38}]
wire [3:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [3:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [3:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [7:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [7:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [7:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [1:0] inflight_1; // @[Monitor.scala:726:35]
wire [1:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [3:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [3:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [7:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [7:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [11:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[11:3]; // @[package.scala:243:46]
wire [8:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1_2 = _d_first_counter1_T_2[8:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [7:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [3:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [15:0] _c_opcode_lookup_T_6 = {12'h0, _c_opcode_lookup_T_1}; // @[Monitor.scala:749:{44,97}]
wire [15:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [7:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [15:0] _c_size_lookup_T_6 = {8'h0, _c_size_lookup_T_1}; // @[Monitor.scala:750:{42,93}]
wire [15:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[15:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[7:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire d_clr_1; // @[Monitor.scala:774:34]
wire d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [3:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [7:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1125 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1125 & d_release_ack_1 & _d_clr_wo_ready_T_1[0]; // @[OneHot.scala:58:35]
wire _T_1107 = _T_1154 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1107 & _d_clr_T_1[0]; // @[OneHot.scala:58:35]
wire [30:0] _d_opcodes_clr_T_11 = 31'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1107 ? _d_opcodes_clr_T_11[3:0] : 4'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [30:0] _d_sizes_clr_T_11 = 31'hFF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1107 ? _d_sizes_clr_T_11[7:0] : 8'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = ~io_in_d_bits_source_0; // @[Monitor.scala:36:7, :795:113]
wire _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [1:0] _inflight_T_5 = {1'h0, _inflight_T_3[0] & _inflight_T_4}; // @[Monitor.scala:814:{35,44,46}]
wire [3:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [3:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [7:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [7:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File Crossing.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.interrupts
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.util.{SynchronizerShiftReg, AsyncResetReg}
@deprecated("IntXing does not ensure interrupt source is glitch free. Use IntSyncSource and IntSyncSink", "rocket-chip 1.2")
class IntXing(sync: Int = 3)(implicit p: Parameters) extends LazyModule
{
val intnode = IntAdapterNode()
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(intnode.in zip intnode.out) foreach { case ((in, _), (out, _)) =>
out := SynchronizerShiftReg(in, sync)
}
}
}
object IntSyncCrossingSource
{
def apply(alreadyRegistered: Boolean = false)(implicit p: Parameters) =
{
val intsource = LazyModule(new IntSyncCrossingSource(alreadyRegistered))
intsource.node
}
}
class IntSyncCrossingSource(alreadyRegistered: Boolean = false)(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSourceNode(alreadyRegistered)
lazy val module = if (alreadyRegistered) (new ImplRegistered) else (new Impl)
class Impl extends LazyModuleImp(this) {
def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0)
override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.sync := AsyncResetReg(Cat(in.reverse)).asBools
}
}
class ImplRegistered extends LazyRawModuleImp(this) {
def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0)
override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}_Registered"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.sync := in
}
}
}
object IntSyncCrossingSink
{
@deprecated("IntSyncCrossingSink which used the `sync` parameter to determine crossing type is deprecated. Use IntSyncAsyncCrossingSink, IntSyncRationalCrossingSink, or IntSyncSyncCrossingSink instead for > 1, 1, and 0 sync values respectively", "rocket-chip 1.2")
def apply(sync: Int = 3)(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync))
intsink.node
}
}
class IntSyncAsyncCrossingSink(sync: Int = 3)(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(sync)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = s"IntSyncAsyncCrossingSink_n${node.out.size}x${node.out.head._1.size}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := SynchronizerShiftReg(in.sync, sync)
}
}
}
object IntSyncAsyncCrossingSink
{
def apply(sync: Int = 3)(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync))
intsink.node
}
}
class IntSyncSyncCrossingSink()(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(0)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
def outSize = node.out.headOption.map(_._1.size).getOrElse(0)
override def desiredName = s"IntSyncSyncCrossingSink_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := in.sync
}
}
}
object IntSyncSyncCrossingSink
{
def apply()(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncSyncCrossingSink())
intsink.node
}
}
class IntSyncRationalCrossingSink()(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(1)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def outSize = node.out.headOption.map(_._1.size).getOrElse(0)
override def desiredName = s"IntSyncRationalCrossingSink_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := RegNext(in.sync)
}
}
}
object IntSyncRationalCrossingSink
{
def apply()(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncRationalCrossingSink())
intsink.node
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File AsyncResetReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
/** This black-boxes an Async Reset
* (or Set)
* Register.
*
* Because Chisel doesn't support
* parameterized black boxes,
* we unfortunately have to
* instantiate a number of these.
*
* We also have to hard-code the set/
* reset behavior.
*
* Do not confuse an asynchronous
* reset signal with an asynchronously
* reset reg. You should still
* properly synchronize your reset
* deassertion.
*
* @param d Data input
* @param q Data Output
* @param clk Clock Input
* @param rst Reset Input
* @param en Write Enable Input
*
*/
class AsyncResetReg(resetValue: Int = 0) extends RawModule {
val io = IO(new Bundle {
val d = Input(Bool())
val q = Output(Bool())
val en = Input(Bool())
val clk = Input(Clock())
val rst = Input(Reset())
})
val reg = withClockAndReset(io.clk, io.rst.asAsyncReset)(RegInit(resetValue.U(1.W)))
when (io.en) {
reg := io.d
}
io.q := reg
}
class SimpleRegIO(val w: Int) extends Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
}
class AsyncResetRegVec(val w: Int, val init: BigInt) extends Module {
override def desiredName = s"AsyncResetRegVec_w${w}_i${init}"
val io = IO(new SimpleRegIO(w))
val reg = withReset(reset.asAsyncReset)(RegInit(init.U(w.W)))
when (io.en) {
reg := io.d
}
io.q := reg
}
object AsyncResetReg {
// Create Single Registers
def apply(d: Bool, clk: Clock, rst: Bool, init: Boolean, name: Option[String]): Bool = {
val reg = Module(new AsyncResetReg(if (init) 1 else 0))
reg.io.d := d
reg.io.clk := clk
reg.io.rst := rst
reg.io.en := true.B
name.foreach(reg.suggestName(_))
reg.io.q
}
def apply(d: Bool, clk: Clock, rst: Bool): Bool = apply(d, clk, rst, false, None)
def apply(d: Bool, clk: Clock, rst: Bool, name: String): Bool = apply(d, clk, rst, false, Some(name))
// Create Vectors of Registers
def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: Option[String] = None): UInt = {
val w = updateData.getWidth max resetData.bitLength
val reg = Module(new AsyncResetRegVec(w, resetData))
name.foreach(reg.suggestName(_))
reg.io.d := updateData
reg.io.en := enable
reg.io.q
}
def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: String): UInt = apply(updateData,
resetData, enable, Some(name))
def apply(updateData: UInt, resetData: BigInt): UInt = apply(updateData, resetData, enable = true.B)
def apply(updateData: UInt, resetData: BigInt, name: String): UInt = apply(updateData, resetData, enable = true.B, Some(name))
def apply(updateData: UInt, enable: Bool): UInt = apply(updateData, resetData=BigInt(0), enable)
def apply(updateData: UInt, enable: Bool, name: String): UInt = apply(updateData, resetData = BigInt(0), enable, Some(name))
def apply(updateData: UInt): UInt = apply(updateData, resetData = BigInt(0), enable = true.B)
def apply(updateData: UInt, name:String): UInt = apply(updateData, resetData = BigInt(0), enable = true.B, Some(name))
}
| module IntSyncCrossingSource_n1x1_56( // @[Crossing.scala:41:9]
input clock, // @[Crossing.scala:41:9]
input reset, // @[Crossing.scala:41:9]
input auto_in_0, // @[LazyModuleImp.scala:107:25]
output auto_out_sync_0 // @[LazyModuleImp.scala:107:25]
);
wire auto_in_0_0 = auto_in_0; // @[Crossing.scala:41:9]
wire nodeIn_0 = auto_in_0_0; // @[Crossing.scala:41:9]
wire nodeOut_sync_0; // @[MixedNode.scala:542:17]
wire auto_out_sync_0_0; // @[Crossing.scala:41:9]
assign auto_out_sync_0_0 = nodeOut_sync_0; // @[Crossing.scala:41:9]
AsyncResetRegVec_w1_i0_56 reg_0 ( // @[AsyncResetReg.scala:86:21]
.clock (clock),
.reset (reset),
.io_d (nodeIn_0), // @[MixedNode.scala:551:17]
.io_q (nodeOut_sync_0)
); // @[AsyncResetReg.scala:86:21]
assign auto_out_sync_0 = auto_out_sync_0_0; // @[Crossing.scala:41:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Protocol.scala:
package constellation.protocol
import chisel3._
import chisel3.util._
import constellation.channel._
import constellation.noc._
import constellation.router.{RouterCtrlBundle}
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.subsystem._
import scala.collection.immutable.{ListMap}
// BEGIN: NodeMapping
case class DiplomaticNetworkNodeMapping(
inNodeMapping: ListMap[String, Int] = ListMap[String, Int](),
outNodeMapping: ListMap[String, Int] = ListMap[String, Int]()
) {
// END: NodeMapping
def genUniqueName(all: Seq[Seq[String]]) = {
all.zipWithIndex.map { case (strs, i) =>
val matches = all.take(i).map(_.mkString).count(_ == strs.mkString)
strs.map(s => s"${s}[${matches}]").mkString(",") + "|"
}
}
def getNode(l: String, nodeMapping: ListMap[String, Int]): Option[Int] = {
val keys = nodeMapping.keys.toSeq
val matches = keys.map(k => l.contains(k))
if (matches.filter(i => i).size == 1) {
val index = matches.indexWhere(i => i)
Some(nodeMapping.values.toSeq(index))
} else {
None
}
}
def getNodes(ls: Seq[String], mapping: ListMap[String, Int]): Seq[Option[Int]] = {
ls.map(l => getNode(l, mapping))
}
def getNodesIn(ls: Seq[String]): Seq[Option[Int]] = getNodes(ls, inNodeMapping)
def getNodesOut(ls: Seq[String]): Seq[Option[Int]] = getNodes(ls, outNodeMapping)
}
// BEGIN: ProtocolParams
trait ProtocolParams {
val minPayloadWidth: Int
val ingressNodes: Seq[Int]
val egressNodes: Seq[Int]
val nVirtualNetworks: Int
val vNetBlocking: (Int, Int) => Boolean
val flows: Seq[FlowParams]
def genIO()(implicit p: Parameters): Data
def interface(
terminals: NoCTerminalIO,
ingressOffset: Int,
egressOffset: Int,
protocol: Data)(implicit p: Parameters)
}
// END: ProtocolParams
// BEGIN: ProtocolNoC
case class ProtocolNoCParams(
nocParams: NoCParams,
protocolParams: Seq[ProtocolParams],
widthDivision: Int = 1,
inlineNoC: Boolean = false
)
class ProtocolNoC(params: ProtocolNoCParams)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val ctrl = if (params.nocParams.hasCtrl) Vec(params.nocParams.topology.nNodes, new RouterCtrlBundle) else Nil
val protocol = MixedVec(params.protocolParams.map { u => u.genIO() })
})
// END: ProtocolNoC
if (params.inlineNoC) chisel3.experimental.annotate(
new chisel3.experimental.ChiselAnnotation {
def toFirrtl: firrtl.annotations.Annotation = firrtl.passes.InlineAnnotation(toNamed)
}
)
val protocolParams = params.protocolParams
val minPayloadWidth = protocolParams.map(_.minPayloadWidth).max
val nocPayloadWidth = math.ceil(minPayloadWidth.toDouble / params.widthDivision).toInt
val terminalPayloadWidth = nocPayloadWidth * params.widthDivision
val ingressOffsets = protocolParams.map(_.ingressNodes.size).scanLeft(0)(_+_)
val egressOffsets = protocolParams.map(_.egressNodes.size).scanLeft(0)(_+_)
val vNetOffsets = protocolParams.map(_.nVirtualNetworks).scanLeft(0)(_+_)
val nocParams = params.nocParams.copy(
ingresses = protocolParams.map(_.ingressNodes).flatten.map(i =>
UserIngressParams(i, payloadBits=terminalPayloadWidth)),
egresses = protocolParams.map(_.egressNodes).flatten.map(i =>
UserEgressParams(i, payloadBits=terminalPayloadWidth)),
routerParams = (i) => params.nocParams.routerParams(i).copy(payloadBits=nocPayloadWidth),
vNetBlocking = (blocker, blockee) => {
def protocolId(i: Int) = vNetOffsets.drop(1).indexWhere(_ > i)
if (protocolId(blocker) == protocolId(blockee)) {
protocolParams(protocolId(blocker)).vNetBlocking(
blocker - vNetOffsets(protocolId(blocker)),
blockee - vNetOffsets(protocolId(blockee))
)
} else {
true
}
},
flows = protocolParams.zipWithIndex.map { case (u,i) =>
u.flows.map(f => f.copy(
ingressId = f.ingressId + ingressOffsets(i),
egressId = f.egressId + egressOffsets(i),
vNetId = f.vNetId + vNetOffsets(i)
))
}.flatten
)
val noc = Module(LazyModule(new NoC(nocParams)).module)
noc.io.router_clocks.foreach(_.clock := clock)
noc.io.router_clocks.foreach(_.reset := reset)
(noc.io.router_ctrl zip io.ctrl).foreach { case (l, r) => l <> r }
(protocolParams zip io.protocol).zipWithIndex.foreach { case ((u, io), x) =>
val terminals = Wire(new NoCTerminalIO(
noc.io.ingressParams.drop(ingressOffsets(x)).take(u.ingressNodes.size),
noc.io.egressParams .drop(egressOffsets(x)) .take(u.egressNodes.size)
))
(terminals.ingress zip noc.io.ingress.drop(ingressOffsets(x))).map { case (l,r) => l <> r }
(terminals.egress zip noc.io.egress.drop (egressOffsets(x))).map { case (l,r) => l <> r }
u.interface(
terminals,
ingressOffsets(x),
egressOffsets(x),
io)
}
}
File Tilelink.scala:
package constellation.protocol
import chisel3._
import chisel3.util._
import constellation.channel._
import constellation.noc._
import constellation.soc.{CanAttachToGlobalNoC}
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.util._
import freechips.rocketchip.tilelink._
import scala.collection.immutable.{ListMap}
trait TLFieldHelper {
def getBodyFields(b: TLChannel): Seq[Data] = b match {
case b: TLBundleA => Seq(b.mask, b.data, b.corrupt)
case b: TLBundleB => Seq(b.mask, b.data, b.corrupt)
case b: TLBundleC => Seq( b.data, b.corrupt)
case b: TLBundleD => Seq( b.data, b.corrupt)
case b: TLBundleE => Seq()
}
def getConstFields(b: TLChannel): Seq[Data] = b match {
case b: TLBundleA => Seq(b.opcode, b.param, b.size, b.source, b.address, b.user, b.echo )
case b: TLBundleB => Seq(b.opcode, b.param, b.size, b.source, b.address )
case b: TLBundleC => Seq(b.opcode, b.param, b.size, b.source, b.address, b.user, b.echo )
case b: TLBundleD => Seq(b.opcode, b.param, b.size, b.source, b.user, b.echo, b.sink, b.denied)
case b: TLBundleE => Seq( b.sink )
}
def minTLPayloadWidth(b: TLChannel): Int = Seq(getBodyFields(b), getConstFields(b)).map(_.map(_.getWidth).sum).max
def minTLPayloadWidth(bs: Seq[TLChannel]): Int = bs.map(b => minTLPayloadWidth(b)).max
def minTLPayloadWidth(b: TLBundle): Int = minTLPayloadWidth(Seq(b.a, b.b, b.c, b.d, b.e).map(_.bits))
}
class TLMasterToNoC(
edgeIn: TLEdge, edgesOut: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
slaveToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = Flipped(new TLBundle(wideBundle))
val flits = new Bundle {
val a = Decoupled(new IngressFlit(flitWidth))
val b = Flipped(Decoupled(new EgressFlit(flitWidth)))
val c = Decoupled(new IngressFlit(flitWidth))
val d = Flipped(Decoupled(new EgressFlit(flitWidth)))
val e = Decoupled(new IngressFlit(flitWidth))
}
})
val a = Module(new TLAToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 0, sourceStart))
val b = Module(new TLBFromNoC(edgeIn, wideBundle, sourceSize))
val c = Module(new TLCToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 1, sourceStart))
val d = Module(new TLDFromNoC(edgeIn, wideBundle, sourceSize))
val e = Module(new TLEToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 2))
a.io.protocol <> io.tilelink.a
io.tilelink.b <> b.io.protocol
c.io.protocol <> io.tilelink.c
io.tilelink.d <> d.io.protocol
e.io.protocol <> io.tilelink.e
io.flits.a <> a.io.flit
b.io.flit <> io.flits.b
io.flits.c <> c.io.flit
d.io.flit <> io.flits.d
io.flits.e <> e.io.flit
}
class TLMasterACDToNoC(
edgeIn: TLEdge, edgesOut: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
slaveToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = Flipped(new TLBundle(wideBundle))
val flits = new Bundle {
val a = Decoupled(new IngressFlit(flitWidth))
val c = Decoupled(new IngressFlit(flitWidth))
val d = Flipped(Decoupled(new EgressFlit(flitWidth)))
}
})
io.tilelink := DontCare
val a = Module(new TLAToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 0, sourceStart))
val c = Module(new TLCToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 1, sourceStart))
val d = Module(new TLDFromNoC(edgeIn, wideBundle, sourceSize))
a.io.protocol <> io.tilelink.a
c.io.protocol <> io.tilelink.c
io.tilelink.d <> d.io.protocol
io.flits.a <> a.io.flit
io.flits.c <> c.io.flit
d.io.flit <> io.flits.d
}
class TLMasterBEToNoC(
edgeIn: TLEdge, edgesOut: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
slaveToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = Flipped(new TLBundle(wideBundle))
val flits = new Bundle {
val b = Flipped(Decoupled(new EgressFlit(flitWidth)))
val e = Decoupled(new IngressFlit(flitWidth))
}
})
io.tilelink := DontCare
val b = Module(new TLBFromNoC(edgeIn, wideBundle, sourceSize))
val e = Module(new TLEToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 0))
io.tilelink.b <> b.io.protocol
e.io.protocol <> io.tilelink.e
b.io.flit <> io.flits.b
io.flits.e <> e.io.flit
}
class TLSlaveToNoC(
edgeOut: TLEdge, edgesIn: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
masterToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = new TLBundle(wideBundle)
val flits = new Bundle {
val a = Flipped(Decoupled(new EgressFlit(flitWidth)))
val b = Decoupled(new IngressFlit(flitWidth))
val c = Flipped(Decoupled(new EgressFlit(flitWidth)))
val d = Decoupled(new IngressFlit(flitWidth))
val e = Flipped(Decoupled(new EgressFlit(flitWidth)))
}
})
val a = Module(new TLAFromNoC(edgeOut, wideBundle))
val b = Module(new TLBToNoC(edgeOut, edgesIn, wideBundle, (i) => masterToEgressOffset(i) + 0))
val c = Module(new TLCFromNoC(edgeOut, wideBundle))
val d = Module(new TLDToNoC(edgeOut, edgesIn, wideBundle, (i) => masterToEgressOffset(i) + 1, sourceStart))
val e = Module(new TLEFromNoC(edgeOut, wideBundle, sourceSize))
io.tilelink.a <> a.io.protocol
b.io.protocol <> io.tilelink.b
io.tilelink.c <> c.io.protocol
d.io.protocol <> io.tilelink.d
io.tilelink.e <> e.io.protocol
a.io.flit <> io.flits.a
io.flits.b <> b.io.flit
c.io.flit <> io.flits.c
io.flits.d <> d.io.flit
e.io.flit <> io.flits.e
}
class TLSlaveACDToNoC(
edgeOut: TLEdge, edgesIn: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
masterToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = new TLBundle(wideBundle)
val flits = new Bundle {
val a = Flipped(Decoupled(new EgressFlit(flitWidth)))
val c = Flipped(Decoupled(new EgressFlit(flitWidth)))
val d = Decoupled(new IngressFlit(flitWidth))
}
})
io.tilelink := DontCare
val a = Module(new TLAFromNoC(edgeOut, wideBundle))
val c = Module(new TLCFromNoC(edgeOut, wideBundle))
val d = Module(new TLDToNoC(edgeOut, edgesIn, wideBundle, (i) => masterToEgressOffset(i) + 0, sourceStart))
io.tilelink.a <> a.io.protocol
io.tilelink.c <> c.io.protocol
d.io.protocol <> io.tilelink.d
a.io.flit <> io.flits.a
c.io.flit <> io.flits.c
io.flits.d <> d.io.flit
}
class TLSlaveBEToNoC(
edgeOut: TLEdge, edgesIn: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
masterToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = new TLBundle(wideBundle)
val flits = new Bundle {
val b = Decoupled(new IngressFlit(flitWidth))
val e = Flipped(Decoupled(new EgressFlit(flitWidth)))
}
})
io.tilelink := DontCare
val b = Module(new TLBToNoC(edgeOut, edgesIn, wideBundle, (i) => masterToEgressOffset(i) + 0))
val e = Module(new TLEFromNoC(edgeOut, wideBundle, sourceSize))
b.io.protocol <> io.tilelink.b
io.tilelink.e <> e.io.protocol
io.flits.b <> b.io.flit
e.io.flit <> io.flits.e
}
class TileLinkInterconnectInterface(edgesIn: Seq[TLEdge], edgesOut: Seq[TLEdge])(implicit val p: Parameters) extends Bundle {
val in = MixedVec(edgesIn.map { e => Flipped(new TLBundle(e.bundle)) })
val out = MixedVec(edgesOut.map { e => new TLBundle(e.bundle) })
}
trait TileLinkProtocolParams extends ProtocolParams with TLFieldHelper {
def edgesIn: Seq[TLEdge]
def edgesOut: Seq[TLEdge]
def edgeInNodes: Seq[Int]
def edgeOutNodes: Seq[Int]
require(edgesIn.size == edgeInNodes.size && edgesOut.size == edgeOutNodes.size)
def wideBundle = TLBundleParameters.union(edgesIn.map(_.bundle) ++ edgesOut.map(_.bundle))
def genBundle = new TLBundle(wideBundle)
def inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client))
def outputIdRanges = TLXbar.mapOutputIds(edgesOut.map(_.manager))
val vNetBlocking = (blocker: Int, blockee: Int) => blocker < blockee
def genIO()(implicit p: Parameters): Data = new TileLinkInterconnectInterface(edgesIn, edgesOut)
}
object TLConnect {
def apply[T <: TLBundleBase](l: DecoupledIO[T], r: DecoupledIO[T]) = {
l.valid := r.valid
r.ready := l.ready
l.bits.squeezeAll.waiveAll :<>= r.bits.squeezeAll.waiveAll
}
}
// BEGIN: TileLinkProtocolParams
case class TileLinkABCDEProtocolParams(
edgesIn: Seq[TLEdge],
edgesOut: Seq[TLEdge],
edgeInNodes: Seq[Int],
edgeOutNodes: Seq[Int]
) extends TileLinkProtocolParams {
// END: TileLinkProtocolParams
val minPayloadWidth = minTLPayloadWidth(new TLBundle(wideBundle))
val ingressNodes = (edgeInNodes.map(u => Seq.fill(3) (u)) ++ edgeOutNodes.map(u => Seq.fill (2) {u})).flatten
val egressNodes = (edgeInNodes.map(u => Seq.fill(2) (u)) ++ edgeOutNodes.map(u => Seq.fill (3) {u})).flatten
val nVirtualNetworks = 5
val flows = edgesIn.zipWithIndex.map { case (edgeIn, ii) => edgesOut.zipWithIndex.map { case (edgeOut, oi) =>
val reachable = edgeIn.client.clients.exists { c => edgeOut.manager.managers.exists { m =>
c.visibility.exists { ca => m.address.exists { ma =>
ca.overlaps(ma)
}}
}}
val probe = edgeIn.client.anySupportProbe && edgeOut.manager.managers.exists(_.regionType >= RegionType.TRACKED)
val release = edgeIn.client.anySupportProbe && edgeOut.manager.anySupportAcquireB
( (if (reachable) Some(FlowParams(ii * 3 + 0 , oi * 3 + 0 + edgesIn.size * 2, 4)) else None) ++ // A
(if (probe ) Some(FlowParams(oi * 2 + 0 + edgesIn.size * 3, ii * 2 + 0 , 3)) else None) ++ // B
(if (release ) Some(FlowParams(ii * 3 + 1 , oi * 3 + 1 + edgesIn.size * 2, 2)) else None) ++ // C
(if (reachable) Some(FlowParams(oi * 2 + 1 + edgesIn.size * 3, ii * 2 + 1 , 1)) else None) ++ // D
(if (release ) Some(FlowParams(ii * 3 + 2 , oi * 3 + 2 + edgesIn.size * 2, 0)) else None)) // E
}}.flatten.flatten
def interface(terminals: NoCTerminalIO,
ingressOffset: Int, egressOffset: Int, protocol: Data)(implicit p: Parameters) = {
val ingresses = terminals.ingress
val egresses = terminals.egress
protocol match { case protocol: TileLinkInterconnectInterface => {
edgesIn.zipWithIndex.map { case (e,i) =>
val nif_master = Module(new TLMasterToNoC(
e, edgesOut, inputIdRanges(i).start, inputIdRanges(i).size,
wideBundle,
(s) => s * 3 + edgesIn.size * 2 + egressOffset,
minPayloadWidth
))
nif_master.io.tilelink := DontCare
nif_master.io.tilelink.a.valid := false.B
nif_master.io.tilelink.c.valid := false.B
nif_master.io.tilelink.e.valid := false.B
TLConnect(nif_master.io.tilelink.a, protocol.in(i).a)
TLConnect(protocol.in(i).d, nif_master.io.tilelink.d)
if (protocol.in(i).params.hasBCE) {
TLConnect(protocol.in(i).b, nif_master.io.tilelink.b)
TLConnect(nif_master.io.tilelink.c, protocol.in(i).c)
TLConnect(nif_master.io.tilelink.e, protocol.in(i).e)
}
ingresses(i * 3 + 0).flit <> nif_master.io.flits.a
ingresses(i * 3 + 1).flit <> nif_master.io.flits.c
ingresses(i * 3 + 2).flit <> nif_master.io.flits.e
nif_master.io.flits.b <> egresses(i * 2 + 0).flit
nif_master.io.flits.d <> egresses(i * 2 + 1).flit
}
edgesOut.zipWithIndex.map { case (e,i) =>
val nif_slave = Module(new TLSlaveToNoC(
e, edgesIn, outputIdRanges(i).start, outputIdRanges(i).size,
wideBundle,
(s) => s * 2 + egressOffset,
minPayloadWidth
))
nif_slave.io.tilelink := DontCare
nif_slave.io.tilelink.b.valid := false.B
nif_slave.io.tilelink.d.valid := false.B
TLConnect(protocol.out(i).a, nif_slave.io.tilelink.a)
TLConnect(nif_slave.io.tilelink.d, protocol.out(i).d)
if (protocol.out(i).params.hasBCE) {
TLConnect(nif_slave.io.tilelink.b, protocol.out(i).b)
TLConnect(protocol.out(i).c, nif_slave.io.tilelink.c)
TLConnect(protocol.out(i).e, nif_slave.io.tilelink.e)
}
ingresses(i * 2 + 0 + edgesIn.size * 3).flit <> nif_slave.io.flits.b
ingresses(i * 2 + 1 + edgesIn.size * 3).flit <> nif_slave.io.flits.d
nif_slave.io.flits.a <> egresses(i * 3 + 0 + edgesIn.size * 2).flit
nif_slave.io.flits.c <> egresses(i * 3 + 1 + edgesIn.size * 2).flit
nif_slave.io.flits.e <> egresses(i * 3 + 2 + edgesIn.size * 2).flit
}
} }
}
}
case class TileLinkACDProtocolParams(
edgesIn: Seq[TLEdge],
edgesOut: Seq[TLEdge],
edgeInNodes: Seq[Int],
edgeOutNodes: Seq[Int]) extends TileLinkProtocolParams {
val minPayloadWidth = minTLPayloadWidth(Seq(genBundle.a, genBundle.c, genBundle.d).map(_.bits))
val ingressNodes = (edgeInNodes.map(u => Seq.fill(2) (u)) ++ edgeOutNodes.map(u => Seq.fill (1) {u})).flatten
val egressNodes = (edgeInNodes.map(u => Seq.fill(1) (u)) ++ edgeOutNodes.map(u => Seq.fill (2) {u})).flatten
val nVirtualNetworks = 3
val flows = edgesIn.zipWithIndex.map { case (edgeIn, ii) => edgesOut.zipWithIndex.map { case (edgeOut, oi) =>
val reachable = edgeIn.client.clients.exists { c => edgeOut.manager.managers.exists { m =>
c.visibility.exists { ca => m.address.exists { ma =>
ca.overlaps(ma)
}}
}}
val release = edgeIn.client.anySupportProbe && edgeOut.manager.anySupportAcquireB
( (if (reachable) Some(FlowParams(ii * 2 + 0 , oi * 2 + 0 + edgesIn.size * 1, 2)) else None) ++ // A
(if (release ) Some(FlowParams(ii * 2 + 1 , oi * 2 + 1 + edgesIn.size * 1, 1)) else None) ++ // C
(if (reachable) Some(FlowParams(oi * 1 + 0 + edgesIn.size * 2, ii * 1 + 0 , 0)) else None)) // D
}}.flatten.flatten
def interface(terminals: NoCTerminalIO,
ingressOffset: Int, egressOffset: Int, protocol: Data)(implicit p: Parameters) = {
val ingresses = terminals.ingress
val egresses = terminals.egress
protocol match { case protocol: TileLinkInterconnectInterface => {
protocol := DontCare
edgesIn.zipWithIndex.map { case (e,i) =>
val nif_master_acd = Module(new TLMasterACDToNoC(
e, edgesOut, inputIdRanges(i).start, inputIdRanges(i).size,
wideBundle,
(s) => s * 2 + edgesIn.size * 1 + egressOffset,
minPayloadWidth
))
nif_master_acd.io.tilelink := DontCare
nif_master_acd.io.tilelink.a.valid := false.B
nif_master_acd.io.tilelink.c.valid := false.B
nif_master_acd.io.tilelink.e.valid := false.B
TLConnect(nif_master_acd.io.tilelink.a, protocol.in(i).a)
TLConnect(protocol.in(i).d, nif_master_acd.io.tilelink.d)
if (protocol.in(i).params.hasBCE) {
TLConnect(nif_master_acd.io.tilelink.c, protocol.in(i).c)
}
ingresses(i * 2 + 0).flit <> nif_master_acd.io.flits.a
ingresses(i * 2 + 1).flit <> nif_master_acd.io.flits.c
nif_master_acd.io.flits.d <> egresses(i * 1 + 0).flit
}
edgesOut.zipWithIndex.map { case (e,i) =>
val nif_slave_acd = Module(new TLSlaveACDToNoC(
e, edgesIn, outputIdRanges(i).start, outputIdRanges(i).size,
wideBundle,
(s) => s * 1 + egressOffset,
minPayloadWidth
))
nif_slave_acd.io.tilelink := DontCare
nif_slave_acd.io.tilelink.b.valid := false.B
nif_slave_acd.io.tilelink.d.valid := false.B
TLConnect(protocol.out(i).a, nif_slave_acd.io.tilelink.a)
TLConnect(nif_slave_acd.io.tilelink.d, protocol.out(i).d)
if (protocol.out(i).params.hasBCE) {
TLConnect(protocol.out(i).c, nif_slave_acd.io.tilelink.c)
}
ingresses(i * 1 + 0 + edgesIn.size * 2).flit <> nif_slave_acd.io.flits.d
nif_slave_acd.io.flits.a <> egresses(i * 2 + 0 + edgesIn.size * 1).flit
nif_slave_acd.io.flits.c <> egresses(i * 2 + 1 + edgesIn.size * 1).flit
}
}}
}
}
case class TileLinkBEProtocolParams(
edgesIn: Seq[TLEdge],
edgesOut: Seq[TLEdge],
edgeInNodes: Seq[Int],
edgeOutNodes: Seq[Int]) extends TileLinkProtocolParams {
val minPayloadWidth = minTLPayloadWidth(Seq(genBundle.b, genBundle.e).map(_.bits))
val ingressNodes = (edgeInNodes.map(u => Seq.fill(1) (u)) ++ edgeOutNodes.map(u => Seq.fill (1) {u})).flatten
val egressNodes = (edgeInNodes.map(u => Seq.fill(1) (u)) ++ edgeOutNodes.map(u => Seq.fill (1) {u})).flatten
val nVirtualNetworks = 2
val flows = edgesIn.zipWithIndex.map { case (edgeIn, ii) => edgesOut.zipWithIndex.map { case (edgeOut, oi) =>
val probe = edgeIn.client.anySupportProbe && edgeOut.manager.managers.exists(_.regionType >= RegionType.TRACKED)
val release = edgeIn.client.anySupportProbe && edgeOut.manager.anySupportAcquireB
( (if (probe ) Some(FlowParams(oi * 1 + 0 + edgesIn.size * 1, ii * 1 + 0 , 1)) else None) ++ // B
(if (release ) Some(FlowParams(ii * 1 + 0 , oi * 1 + 0 + edgesIn.size * 1, 0)) else None)) // E
}}.flatten.flatten
def interface(terminals: NoCTerminalIO,
ingressOffset: Int, egressOffset: Int, protocol: Data)(implicit p: Parameters) = {
val ingresses = terminals.ingress
val egresses = terminals.egress
protocol match { case protocol: TileLinkInterconnectInterface => {
protocol := DontCare
edgesIn.zipWithIndex.map { case (e,i) =>
val nif_master_be = Module(new TLMasterBEToNoC(
e, edgesOut, inputIdRanges(i).start, inputIdRanges(i).size,
wideBundle,
(s) => s * 1 + edgesIn.size * 1 + egressOffset,
minPayloadWidth
))
nif_master_be.io.tilelink := DontCare
nif_master_be.io.tilelink.a.valid := false.B
nif_master_be.io.tilelink.c.valid := false.B
nif_master_be.io.tilelink.e.valid := false.B
if (protocol.in(i).params.hasBCE) {
TLConnect(protocol.in(i).b, nif_master_be.io.tilelink.b)
TLConnect(nif_master_be.io.tilelink.e, protocol.in(i).e)
}
ingresses(i * 1 + 0).flit <> nif_master_be.io.flits.e
nif_master_be.io.flits.b <> egresses(i * 1 + 0).flit
}
edgesOut.zipWithIndex.map { case (e,i) =>
val nif_slave_be = Module(new TLSlaveBEToNoC(
e, edgesIn, outputIdRanges(i).start, outputIdRanges(i).size,
wideBundle,
(s) => s * 1 + egressOffset,
minPayloadWidth
))
nif_slave_be.io.tilelink := DontCare
nif_slave_be.io.tilelink.b.valid := false.B
nif_slave_be.io.tilelink.d.valid := false.B
if (protocol.out(i).params.hasBCE) {
TLConnect(protocol.out(i).e, nif_slave_be.io.tilelink.e)
TLConnect(nif_slave_be.io.tilelink.b, protocol.out(i).b)
}
ingresses(i * 1 + 0 + edgesIn.size * 1).flit <> nif_slave_be.io.flits.b
nif_slave_be.io.flits.e <> egresses(i * 1 + 0 + edgesIn.size * 1).flit
}
}}
}
}
abstract class TLNoCLike(implicit p: Parameters) extends LazyModule {
val node = new TLNexusNode(
clientFn = { seq =>
seq(0).v1copy(
echoFields = BundleField.union(seq.flatMap(_.echoFields)),
requestFields = BundleField.union(seq.flatMap(_.requestFields)),
responseKeys = seq.flatMap(_.responseKeys).distinct,
minLatency = seq.map(_.minLatency).min,
clients = (TLXbar.mapInputIds(seq) zip seq) flatMap { case (range, port) =>
port.clients map { client => client.v1copy(
sourceId = client.sourceId.shift(range.start)
)}
}
)
},
managerFn = { seq =>
val fifoIdFactory = TLXbar.relabeler()
seq(0).v1copy(
responseFields = BundleField.union(seq.flatMap(_.responseFields)),
requestKeys = seq.flatMap(_.requestKeys).distinct,
minLatency = seq.map(_.minLatency).min,
endSinkId = TLXbar.mapOutputIds(seq).map(_.end).max,
managers = seq.flatMap { port =>
require (port.beatBytes == seq(0).beatBytes,
s"TLNoC (data widths don't match: ${port.managers.map(_.name)} has ${port.beatBytes}B vs ${seq(0).managers.map(_.name)} has ${seq(0).beatBytes}B")
// TileLink NoC does not preserve FIFO-ness, masters to this NoC should instantiate FIFOFixers
port.managers map { manager => manager.v1copy(fifoId = None) }
}
)
}
)
}
abstract class TLNoCModuleImp(outer: LazyModule) extends LazyModuleImp(outer) {
val edgesIn: Seq[TLEdge]
val edgesOut: Seq[TLEdge]
val nodeMapping: DiplomaticNetworkNodeMapping
val nocName: String
lazy val inNames = nodeMapping.genUniqueName(edgesIn.map(_.master.masters.map(_.name)))
lazy val outNames = nodeMapping.genUniqueName(edgesOut.map(_.slave.slaves.map(_.name)))
lazy val edgeInNodes = nodeMapping.getNodesIn(inNames)
lazy val edgeOutNodes = nodeMapping.getNodesOut(outNames)
def printNodeMappings() {
println(s"Constellation: TLNoC $nocName inwards mapping:")
for ((n, i) <- inNames zip edgeInNodes) {
val node = i.map(_.toString).getOrElse("X")
println(s" $node <- $n")
}
println(s"Constellation: TLNoC $nocName outwards mapping:")
for ((n, i) <- outNames zip edgeOutNodes) {
val node = i.map(_.toString).getOrElse("X")
println(s" $node <- $n")
}
}
}
trait TLNoCParams
// Instantiates a private TLNoC. Replaces the TLXbar
// BEGIN: TLNoCParams
case class SimpleTLNoCParams(
nodeMappings: DiplomaticNetworkNodeMapping,
nocParams: NoCParams = NoCParams(),
) extends TLNoCParams
class TLNoC(params: SimpleTLNoCParams, name: String = "test", inlineNoC: Boolean = false)(implicit p: Parameters) extends TLNoCLike {
// END: TLNoCParams
override def shouldBeInlined = inlineNoC
lazy val module = new TLNoCModuleImp(this) {
val (io_in, edgesIn) = node.in.unzip
val (io_out, edgesOut) = node.out.unzip
val nodeMapping = params.nodeMappings
val nocName = name
printNodeMappings()
val protocolParams = TileLinkABCDEProtocolParams(
edgesIn = edgesIn,
edgesOut = edgesOut,
edgeInNodes = edgeInNodes.flatten,
edgeOutNodes = edgeOutNodes.flatten
)
val noc = Module(new ProtocolNoC(ProtocolNoCParams(
params.nocParams.copy(hasCtrl = false, nocName=name, inlineNoC = inlineNoC),
Seq(protocolParams),
inlineNoC = inlineNoC
)))
noc.io.protocol(0) match {
case protocol: TileLinkInterconnectInterface => {
(protocol.in zip io_in).foreach { case (l,r) => l <> r }
(io_out zip protocol.out).foreach { case (l,r) => l <> r }
}
}
}
}
case class SplitACDxBETLNoCParams(
nodeMappings: DiplomaticNetworkNodeMapping,
acdNoCParams: NoCParams = NoCParams(),
beNoCParams: NoCParams = NoCParams(),
beDivision: Int = 2
) extends TLNoCParams
class TLSplitACDxBENoC(params: SplitACDxBETLNoCParams, name: String = "test", inlineNoC: Boolean = false)(implicit p: Parameters) extends TLNoCLike {
override def shouldBeInlined = inlineNoC
lazy val module = new TLNoCModuleImp(this) {
val (io_in, edgesIn) = node.in.unzip
val (io_out, edgesOut) = node.out.unzip
val nodeMapping = params.nodeMappings
val nocName = name
printNodeMappings()
val acdProtocolParams = TileLinkACDProtocolParams(
edgesIn = edgesIn,
edgesOut = edgesOut,
edgeInNodes = edgeInNodes.flatten,
edgeOutNodes = edgeOutNodes.flatten
)
val beProtocolParams = TileLinkBEProtocolParams(
edgesIn = edgesIn,
edgesOut = edgesOut,
edgeInNodes = edgeInNodes.flatten,
edgeOutNodes = edgeOutNodes.flatten
)
val acd_noc = Module(new ProtocolNoC(ProtocolNoCParams(
params.acdNoCParams.copy(hasCtrl = false, nocName=s"${name}_acd", inlineNoC = inlineNoC),
Seq(acdProtocolParams),
inlineNoC = inlineNoC
)))
val be_noc = Module(new ProtocolNoC(ProtocolNoCParams(
params.beNoCParams.copy(hasCtrl = false, nocName=s"${name}_be", inlineNoC = inlineNoC),
Seq(beProtocolParams),
widthDivision = params.beDivision,
inlineNoC = inlineNoC
)))
acd_noc.io.protocol(0) match { case protocol: TileLinkInterconnectInterface => {
(protocol.in zip io_in).foreach { case (l,r) =>
l := DontCare
l.a <> r.a
l.c <> r.c
l.d <> r.d
}
(io_out zip protocol.out).foreach { case (l,r) =>
r := DontCare
l.a <> r.a
l.c <> r.c
l.d <> r.d
}
}}
be_noc.io.protocol(0) match { case protocol: TileLinkInterconnectInterface => {
(protocol.in zip io_in).foreach { case (l,r) =>
l := DontCare
l.b <> r.b
l.e <> r.e
}
(io_out zip protocol.out).foreach { case (l,r) =>
r := DontCare
l.b <> r.b
l.e <> r.e
}
}}
}
}
case class GlobalTLNoCParams(
nodeMappings: DiplomaticNetworkNodeMapping
) extends TLNoCParams
// Maps this interconnect onto a global NoC
class TLGlobalNoC(params: GlobalTLNoCParams, name: String = "test")(implicit p: Parameters) extends TLNoCLike {
lazy val module = new TLNoCModuleImp(this) with CanAttachToGlobalNoC {
val (io_in, edgesIn) = node.in.unzip
val (io_out, edgesOut) = node.out.unzip
val nodeMapping = params.nodeMappings
val nocName = name
val protocolParams = TileLinkABCDEProtocolParams(
edgesIn = edgesIn,
edgesOut = edgesOut,
edgeInNodes = edgeInNodes.flatten,
edgeOutNodes = edgeOutNodes.flatten
)
printNodeMappings()
val io_global = IO(Flipped(protocolParams.genIO()))
io_global match {
case protocol: TileLinkInterconnectInterface => {
(protocol.in zip io_in).foreach { case (l,r) => l <> r }
(io_out zip protocol.out).foreach { case (l,r) => l <> r }
}
}
}
}
| module ProtocolNoC_1( // @[Protocol.scala:70:7]
input clock, // @[Protocol.scala:70:7]
input reset, // @[Protocol.scala:70:7]
input io_protocol_0_in_8_b_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_8_b_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_8_b_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_8_b_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_8_b_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_8_b_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_in_8_b_bits_address, // @[Protocol.scala:71:14]
output [7:0] io_protocol_0_in_8_b_bits_mask, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_8_b_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_8_b_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_8_e_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_8_e_valid, // @[Protocol.scala:71:14]
input [4:0] io_protocol_0_in_8_e_bits_sink, // @[Protocol.scala:71:14]
input io_protocol_0_in_7_b_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_7_b_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_7_b_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_7_b_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_7_b_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_7_b_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_in_7_b_bits_address, // @[Protocol.scala:71:14]
output [7:0] io_protocol_0_in_7_b_bits_mask, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_7_b_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_7_b_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_7_e_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_7_e_valid, // @[Protocol.scala:71:14]
input [4:0] io_protocol_0_in_7_e_bits_sink, // @[Protocol.scala:71:14]
input io_protocol_0_in_6_b_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_6_b_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_6_b_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_6_b_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_6_b_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_6_b_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_in_6_b_bits_address, // @[Protocol.scala:71:14]
output [7:0] io_protocol_0_in_6_b_bits_mask, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_6_b_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_6_b_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_6_e_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_6_e_valid, // @[Protocol.scala:71:14]
input [4:0] io_protocol_0_in_6_e_bits_sink, // @[Protocol.scala:71:14]
input io_protocol_0_in_5_b_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_5_b_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_5_b_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_5_b_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_5_b_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_5_b_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_in_5_b_bits_address, // @[Protocol.scala:71:14]
output [7:0] io_protocol_0_in_5_b_bits_mask, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_5_b_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_5_b_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_5_e_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_5_e_valid, // @[Protocol.scala:71:14]
input [4:0] io_protocol_0_in_5_e_bits_sink, // @[Protocol.scala:71:14]
input io_protocol_0_in_4_b_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_4_b_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_4_b_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_4_b_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_4_b_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_4_b_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_in_4_b_bits_address, // @[Protocol.scala:71:14]
output [7:0] io_protocol_0_in_4_b_bits_mask, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_4_b_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_4_b_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_4_e_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_4_e_valid, // @[Protocol.scala:71:14]
input [4:0] io_protocol_0_in_4_e_bits_sink, // @[Protocol.scala:71:14]
input io_protocol_0_in_3_b_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_3_b_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_3_b_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_3_b_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_3_b_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_3_b_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_in_3_b_bits_address, // @[Protocol.scala:71:14]
output [7:0] io_protocol_0_in_3_b_bits_mask, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_3_b_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_3_b_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_3_e_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_3_e_valid, // @[Protocol.scala:71:14]
input [4:0] io_protocol_0_in_3_e_bits_sink, // @[Protocol.scala:71:14]
input io_protocol_0_in_2_b_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_2_b_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_2_b_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_2_b_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_2_b_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_2_b_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_in_2_b_bits_address, // @[Protocol.scala:71:14]
output [7:0] io_protocol_0_in_2_b_bits_mask, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_2_b_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_2_b_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_2_e_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_2_e_valid, // @[Protocol.scala:71:14]
input [4:0] io_protocol_0_in_2_e_bits_sink, // @[Protocol.scala:71:14]
input io_protocol_0_in_1_b_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_1_b_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_1_b_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_1_b_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_1_b_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_1_b_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_in_1_b_bits_address, // @[Protocol.scala:71:14]
output [7:0] io_protocol_0_in_1_b_bits_mask, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_1_b_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_1_b_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_1_e_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_1_e_valid, // @[Protocol.scala:71:14]
input [4:0] io_protocol_0_in_1_e_bits_sink, // @[Protocol.scala:71:14]
output io_protocol_0_out_4_b_ready, // @[Protocol.scala:71:14]
input io_protocol_0_out_4_b_valid, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_out_4_b_bits_param, // @[Protocol.scala:71:14]
input [5:0] io_protocol_0_out_4_b_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_out_4_b_bits_address, // @[Protocol.scala:71:14]
output io_protocol_0_out_4_e_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_4_e_bits_sink, // @[Protocol.scala:71:14]
output io_protocol_0_out_3_b_ready, // @[Protocol.scala:71:14]
input io_protocol_0_out_3_b_valid, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_out_3_b_bits_param, // @[Protocol.scala:71:14]
input [5:0] io_protocol_0_out_3_b_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_out_3_b_bits_address, // @[Protocol.scala:71:14]
output io_protocol_0_out_3_e_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_3_e_bits_sink, // @[Protocol.scala:71:14]
output io_protocol_0_out_2_b_ready, // @[Protocol.scala:71:14]
input io_protocol_0_out_2_b_valid, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_out_2_b_bits_param, // @[Protocol.scala:71:14]
input [5:0] io_protocol_0_out_2_b_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_out_2_b_bits_address, // @[Protocol.scala:71:14]
output io_protocol_0_out_2_e_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_2_e_bits_sink, // @[Protocol.scala:71:14]
output io_protocol_0_out_1_b_ready, // @[Protocol.scala:71:14]
input io_protocol_0_out_1_b_valid, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_out_1_b_bits_param, // @[Protocol.scala:71:14]
input [5:0] io_protocol_0_out_1_b_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_out_1_b_bits_address, // @[Protocol.scala:71:14]
output io_protocol_0_out_1_e_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_1_e_bits_sink // @[Protocol.scala:71:14]
);
wire [4:0] _nif_slave_be_4_io_tilelink_e_bits_sink; // @[Tilelink.scala:455:34]
wire _nif_slave_be_4_io_flits_b_valid; // @[Tilelink.scala:455:34]
wire _nif_slave_be_4_io_flits_b_bits_head; // @[Tilelink.scala:455:34]
wire _nif_slave_be_4_io_flits_b_bits_tail; // @[Tilelink.scala:455:34]
wire [72:0] _nif_slave_be_4_io_flits_b_bits_payload; // @[Tilelink.scala:455:34]
wire [3:0] _nif_slave_be_4_io_flits_b_bits_egress_id; // @[Tilelink.scala:455:34]
wire [4:0] _nif_slave_be_3_io_tilelink_e_bits_sink; // @[Tilelink.scala:455:34]
wire _nif_slave_be_3_io_flits_b_valid; // @[Tilelink.scala:455:34]
wire _nif_slave_be_3_io_flits_b_bits_head; // @[Tilelink.scala:455:34]
wire _nif_slave_be_3_io_flits_b_bits_tail; // @[Tilelink.scala:455:34]
wire [72:0] _nif_slave_be_3_io_flits_b_bits_payload; // @[Tilelink.scala:455:34]
wire [3:0] _nif_slave_be_3_io_flits_b_bits_egress_id; // @[Tilelink.scala:455:34]
wire [4:0] _nif_slave_be_2_io_tilelink_e_bits_sink; // @[Tilelink.scala:455:34]
wire _nif_slave_be_2_io_flits_b_valid; // @[Tilelink.scala:455:34]
wire _nif_slave_be_2_io_flits_b_bits_head; // @[Tilelink.scala:455:34]
wire _nif_slave_be_2_io_flits_b_bits_tail; // @[Tilelink.scala:455:34]
wire [72:0] _nif_slave_be_2_io_flits_b_bits_payload; // @[Tilelink.scala:455:34]
wire [3:0] _nif_slave_be_2_io_flits_b_bits_egress_id; // @[Tilelink.scala:455:34]
wire [4:0] _nif_slave_be_1_io_tilelink_e_bits_sink; // @[Tilelink.scala:455:34]
wire _nif_slave_be_1_io_flits_b_valid; // @[Tilelink.scala:455:34]
wire _nif_slave_be_1_io_flits_b_bits_head; // @[Tilelink.scala:455:34]
wire _nif_slave_be_1_io_flits_b_bits_tail; // @[Tilelink.scala:455:34]
wire [72:0] _nif_slave_be_1_io_flits_b_bits_payload; // @[Tilelink.scala:455:34]
wire [3:0] _nif_slave_be_1_io_flits_b_bits_egress_id; // @[Tilelink.scala:455:34]
wire _nif_slave_be_io_flits_b_valid; // @[Tilelink.scala:455:34]
wire _nif_slave_be_io_flits_b_bits_head; // @[Tilelink.scala:455:34]
wire _nif_slave_be_io_flits_b_bits_tail; // @[Tilelink.scala:455:34]
wire [72:0] _nif_slave_be_io_flits_b_bits_payload; // @[Tilelink.scala:455:34]
wire [3:0] _nif_slave_be_io_flits_b_bits_egress_id; // @[Tilelink.scala:455:34]
wire _nif_slave_be_io_flits_e_ready; // @[Tilelink.scala:455:34]
wire [5:0] _nif_master_be_8_io_tilelink_b_bits_source; // @[Tilelink.scala:435:35]
wire _nif_master_be_8_io_flits_b_ready; // @[Tilelink.scala:435:35]
wire _nif_master_be_8_io_flits_e_valid; // @[Tilelink.scala:435:35]
wire _nif_master_be_8_io_flits_e_bits_head; // @[Tilelink.scala:435:35]
wire [72:0] _nif_master_be_8_io_flits_e_bits_payload; // @[Tilelink.scala:435:35]
wire [3:0] _nif_master_be_8_io_flits_e_bits_egress_id; // @[Tilelink.scala:435:35]
wire [5:0] _nif_master_be_7_io_tilelink_b_bits_source; // @[Tilelink.scala:435:35]
wire _nif_master_be_7_io_flits_b_ready; // @[Tilelink.scala:435:35]
wire _nif_master_be_7_io_flits_e_valid; // @[Tilelink.scala:435:35]
wire _nif_master_be_7_io_flits_e_bits_head; // @[Tilelink.scala:435:35]
wire [72:0] _nif_master_be_7_io_flits_e_bits_payload; // @[Tilelink.scala:435:35]
wire [3:0] _nif_master_be_7_io_flits_e_bits_egress_id; // @[Tilelink.scala:435:35]
wire [5:0] _nif_master_be_6_io_tilelink_b_bits_source; // @[Tilelink.scala:435:35]
wire _nif_master_be_6_io_flits_b_ready; // @[Tilelink.scala:435:35]
wire _nif_master_be_6_io_flits_e_valid; // @[Tilelink.scala:435:35]
wire _nif_master_be_6_io_flits_e_bits_head; // @[Tilelink.scala:435:35]
wire [72:0] _nif_master_be_6_io_flits_e_bits_payload; // @[Tilelink.scala:435:35]
wire [3:0] _nif_master_be_6_io_flits_e_bits_egress_id; // @[Tilelink.scala:435:35]
wire [5:0] _nif_master_be_5_io_tilelink_b_bits_source; // @[Tilelink.scala:435:35]
wire _nif_master_be_5_io_flits_b_ready; // @[Tilelink.scala:435:35]
wire _nif_master_be_5_io_flits_e_valid; // @[Tilelink.scala:435:35]
wire _nif_master_be_5_io_flits_e_bits_head; // @[Tilelink.scala:435:35]
wire [72:0] _nif_master_be_5_io_flits_e_bits_payload; // @[Tilelink.scala:435:35]
wire [3:0] _nif_master_be_5_io_flits_e_bits_egress_id; // @[Tilelink.scala:435:35]
wire [5:0] _nif_master_be_4_io_tilelink_b_bits_source; // @[Tilelink.scala:435:35]
wire _nif_master_be_4_io_flits_b_ready; // @[Tilelink.scala:435:35]
wire _nif_master_be_4_io_flits_e_valid; // @[Tilelink.scala:435:35]
wire _nif_master_be_4_io_flits_e_bits_head; // @[Tilelink.scala:435:35]
wire [72:0] _nif_master_be_4_io_flits_e_bits_payload; // @[Tilelink.scala:435:35]
wire [3:0] _nif_master_be_4_io_flits_e_bits_egress_id; // @[Tilelink.scala:435:35]
wire [5:0] _nif_master_be_3_io_tilelink_b_bits_source; // @[Tilelink.scala:435:35]
wire _nif_master_be_3_io_flits_b_ready; // @[Tilelink.scala:435:35]
wire _nif_master_be_3_io_flits_e_valid; // @[Tilelink.scala:435:35]
wire _nif_master_be_3_io_flits_e_bits_head; // @[Tilelink.scala:435:35]
wire [72:0] _nif_master_be_3_io_flits_e_bits_payload; // @[Tilelink.scala:435:35]
wire [3:0] _nif_master_be_3_io_flits_e_bits_egress_id; // @[Tilelink.scala:435:35]
wire [5:0] _nif_master_be_2_io_tilelink_b_bits_source; // @[Tilelink.scala:435:35]
wire _nif_master_be_2_io_flits_b_ready; // @[Tilelink.scala:435:35]
wire _nif_master_be_2_io_flits_e_valid; // @[Tilelink.scala:435:35]
wire _nif_master_be_2_io_flits_e_bits_head; // @[Tilelink.scala:435:35]
wire [72:0] _nif_master_be_2_io_flits_e_bits_payload; // @[Tilelink.scala:435:35]
wire [3:0] _nif_master_be_2_io_flits_e_bits_egress_id; // @[Tilelink.scala:435:35]
wire [5:0] _nif_master_be_1_io_tilelink_b_bits_source; // @[Tilelink.scala:435:35]
wire _nif_master_be_1_io_flits_b_ready; // @[Tilelink.scala:435:35]
wire _nif_master_be_1_io_flits_e_valid; // @[Tilelink.scala:435:35]
wire _nif_master_be_1_io_flits_e_bits_head; // @[Tilelink.scala:435:35]
wire [72:0] _nif_master_be_1_io_flits_e_bits_payload; // @[Tilelink.scala:435:35]
wire [3:0] _nif_master_be_1_io_flits_e_bits_egress_id; // @[Tilelink.scala:435:35]
wire _nif_master_be_io_flits_b_ready; // @[Tilelink.scala:435:35]
wire _nif_master_be_io_flits_e_valid; // @[Tilelink.scala:435:35]
wire _nif_master_be_io_flits_e_bits_head; // @[Tilelink.scala:435:35]
wire [72:0] _nif_master_be_io_flits_e_bits_payload; // @[Tilelink.scala:435:35]
wire [3:0] _nif_master_be_io_flits_e_bits_egress_id; // @[Tilelink.scala:435:35]
wire _noc_io_ingress_13_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_12_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_11_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_10_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_9_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_8_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_7_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_6_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_5_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_4_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_3_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_2_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_1_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_0_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_egress_13_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_13_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_13_flit_bits_tail; // @[Protocol.scala:116:19]
wire [73:0] _noc_io_egress_13_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_12_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_12_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_12_flit_bits_tail; // @[Protocol.scala:116:19]
wire [73:0] _noc_io_egress_12_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_11_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_11_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_11_flit_bits_tail; // @[Protocol.scala:116:19]
wire [73:0] _noc_io_egress_11_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_10_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_10_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_10_flit_bits_tail; // @[Protocol.scala:116:19]
wire [73:0] _noc_io_egress_10_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_9_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_9_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_9_flit_bits_tail; // @[Protocol.scala:116:19]
wire _noc_io_egress_8_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_8_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_8_flit_bits_tail; // @[Protocol.scala:116:19]
wire [73:0] _noc_io_egress_8_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_7_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_7_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_7_flit_bits_tail; // @[Protocol.scala:116:19]
wire [73:0] _noc_io_egress_7_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_6_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_6_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_6_flit_bits_tail; // @[Protocol.scala:116:19]
wire [73:0] _noc_io_egress_6_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_5_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_5_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_5_flit_bits_tail; // @[Protocol.scala:116:19]
wire [73:0] _noc_io_egress_5_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_4_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_4_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_4_flit_bits_tail; // @[Protocol.scala:116:19]
wire [73:0] _noc_io_egress_4_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_3_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_3_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_3_flit_bits_tail; // @[Protocol.scala:116:19]
wire [73:0] _noc_io_egress_3_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_2_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_2_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_2_flit_bits_tail; // @[Protocol.scala:116:19]
wire [73:0] _noc_io_egress_2_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_1_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_1_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_1_flit_bits_tail; // @[Protocol.scala:116:19]
wire [73:0] _noc_io_egress_1_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_0_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_0_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_0_flit_bits_tail; // @[Protocol.scala:116:19]
NoC_1 noc ( // @[Protocol.scala:116:19]
.clock (clock),
.reset (reset),
.io_ingress_13_flit_ready (_noc_io_ingress_13_flit_ready),
.io_ingress_13_flit_valid (_nif_slave_be_4_io_flits_b_valid), // @[Tilelink.scala:455:34]
.io_ingress_13_flit_bits_head (_nif_slave_be_4_io_flits_b_bits_head), // @[Tilelink.scala:455:34]
.io_ingress_13_flit_bits_tail (_nif_slave_be_4_io_flits_b_bits_tail), // @[Tilelink.scala:455:34]
.io_ingress_13_flit_bits_payload ({1'h0, _nif_slave_be_4_io_flits_b_bits_payload}), // @[Tilelink.scala:451:35, :455:34, :470:54]
.io_ingress_13_flit_bits_egress_id (_nif_slave_be_4_io_flits_b_bits_egress_id), // @[Tilelink.scala:455:34]
.io_ingress_12_flit_ready (_noc_io_ingress_12_flit_ready),
.io_ingress_12_flit_valid (_nif_slave_be_3_io_flits_b_valid), // @[Tilelink.scala:455:34]
.io_ingress_12_flit_bits_head (_nif_slave_be_3_io_flits_b_bits_head), // @[Tilelink.scala:455:34]
.io_ingress_12_flit_bits_tail (_nif_slave_be_3_io_flits_b_bits_tail), // @[Tilelink.scala:455:34]
.io_ingress_12_flit_bits_payload ({1'h0, _nif_slave_be_3_io_flits_b_bits_payload}), // @[Tilelink.scala:451:35, :455:34, :470:54]
.io_ingress_12_flit_bits_egress_id (_nif_slave_be_3_io_flits_b_bits_egress_id), // @[Tilelink.scala:455:34]
.io_ingress_11_flit_ready (_noc_io_ingress_11_flit_ready),
.io_ingress_11_flit_valid (_nif_slave_be_2_io_flits_b_valid), // @[Tilelink.scala:455:34]
.io_ingress_11_flit_bits_head (_nif_slave_be_2_io_flits_b_bits_head), // @[Tilelink.scala:455:34]
.io_ingress_11_flit_bits_tail (_nif_slave_be_2_io_flits_b_bits_tail), // @[Tilelink.scala:455:34]
.io_ingress_11_flit_bits_payload ({1'h0, _nif_slave_be_2_io_flits_b_bits_payload}), // @[Tilelink.scala:451:35, :455:34, :470:54]
.io_ingress_11_flit_bits_egress_id (_nif_slave_be_2_io_flits_b_bits_egress_id), // @[Tilelink.scala:455:34]
.io_ingress_10_flit_ready (_noc_io_ingress_10_flit_ready),
.io_ingress_10_flit_valid (_nif_slave_be_1_io_flits_b_valid), // @[Tilelink.scala:455:34]
.io_ingress_10_flit_bits_head (_nif_slave_be_1_io_flits_b_bits_head), // @[Tilelink.scala:455:34]
.io_ingress_10_flit_bits_tail (_nif_slave_be_1_io_flits_b_bits_tail), // @[Tilelink.scala:455:34]
.io_ingress_10_flit_bits_payload ({1'h0, _nif_slave_be_1_io_flits_b_bits_payload}), // @[Tilelink.scala:451:35, :455:34, :470:54]
.io_ingress_10_flit_bits_egress_id (_nif_slave_be_1_io_flits_b_bits_egress_id), // @[Tilelink.scala:455:34]
.io_ingress_9_flit_ready (_noc_io_ingress_9_flit_ready),
.io_ingress_9_flit_valid (_nif_slave_be_io_flits_b_valid), // @[Tilelink.scala:455:34]
.io_ingress_9_flit_bits_head (_nif_slave_be_io_flits_b_bits_head), // @[Tilelink.scala:455:34]
.io_ingress_9_flit_bits_tail (_nif_slave_be_io_flits_b_bits_tail), // @[Tilelink.scala:455:34]
.io_ingress_9_flit_bits_payload ({1'h0, _nif_slave_be_io_flits_b_bits_payload}), // @[Tilelink.scala:451:35, :455:34, :470:54]
.io_ingress_9_flit_bits_egress_id (_nif_slave_be_io_flits_b_bits_egress_id), // @[Tilelink.scala:455:34]
.io_ingress_8_flit_ready (_noc_io_ingress_8_flit_ready),
.io_ingress_8_flit_valid (_nif_master_be_8_io_flits_e_valid), // @[Tilelink.scala:435:35]
.io_ingress_8_flit_bits_head (_nif_master_be_8_io_flits_e_bits_head), // @[Tilelink.scala:435:35]
.io_ingress_8_flit_bits_payload ({1'h0, _nif_master_be_8_io_flits_e_bits_payload}), // @[Tilelink.scala:435:35, :451:35]
.io_ingress_8_flit_bits_egress_id (_nif_master_be_8_io_flits_e_bits_egress_id), // @[Tilelink.scala:435:35]
.io_ingress_7_flit_ready (_noc_io_ingress_7_flit_ready),
.io_ingress_7_flit_valid (_nif_master_be_7_io_flits_e_valid), // @[Tilelink.scala:435:35]
.io_ingress_7_flit_bits_head (_nif_master_be_7_io_flits_e_bits_head), // @[Tilelink.scala:435:35]
.io_ingress_7_flit_bits_payload ({1'h0, _nif_master_be_7_io_flits_e_bits_payload}), // @[Tilelink.scala:435:35, :451:35]
.io_ingress_7_flit_bits_egress_id (_nif_master_be_7_io_flits_e_bits_egress_id), // @[Tilelink.scala:435:35]
.io_ingress_6_flit_ready (_noc_io_ingress_6_flit_ready),
.io_ingress_6_flit_valid (_nif_master_be_6_io_flits_e_valid), // @[Tilelink.scala:435:35]
.io_ingress_6_flit_bits_head (_nif_master_be_6_io_flits_e_bits_head), // @[Tilelink.scala:435:35]
.io_ingress_6_flit_bits_payload ({1'h0, _nif_master_be_6_io_flits_e_bits_payload}), // @[Tilelink.scala:435:35, :451:35]
.io_ingress_6_flit_bits_egress_id (_nif_master_be_6_io_flits_e_bits_egress_id), // @[Tilelink.scala:435:35]
.io_ingress_5_flit_ready (_noc_io_ingress_5_flit_ready),
.io_ingress_5_flit_valid (_nif_master_be_5_io_flits_e_valid), // @[Tilelink.scala:435:35]
.io_ingress_5_flit_bits_head (_nif_master_be_5_io_flits_e_bits_head), // @[Tilelink.scala:435:35]
.io_ingress_5_flit_bits_payload ({1'h0, _nif_master_be_5_io_flits_e_bits_payload}), // @[Tilelink.scala:435:35, :451:35]
.io_ingress_5_flit_bits_egress_id (_nif_master_be_5_io_flits_e_bits_egress_id), // @[Tilelink.scala:435:35]
.io_ingress_4_flit_ready (_noc_io_ingress_4_flit_ready),
.io_ingress_4_flit_valid (_nif_master_be_4_io_flits_e_valid), // @[Tilelink.scala:435:35]
.io_ingress_4_flit_bits_head (_nif_master_be_4_io_flits_e_bits_head), // @[Tilelink.scala:435:35]
.io_ingress_4_flit_bits_payload ({1'h0, _nif_master_be_4_io_flits_e_bits_payload}), // @[Tilelink.scala:435:35, :451:35]
.io_ingress_4_flit_bits_egress_id (_nif_master_be_4_io_flits_e_bits_egress_id), // @[Tilelink.scala:435:35]
.io_ingress_3_flit_ready (_noc_io_ingress_3_flit_ready),
.io_ingress_3_flit_valid (_nif_master_be_3_io_flits_e_valid), // @[Tilelink.scala:435:35]
.io_ingress_3_flit_bits_head (_nif_master_be_3_io_flits_e_bits_head), // @[Tilelink.scala:435:35]
.io_ingress_3_flit_bits_payload ({1'h0, _nif_master_be_3_io_flits_e_bits_payload}), // @[Tilelink.scala:435:35, :451:35]
.io_ingress_3_flit_bits_egress_id (_nif_master_be_3_io_flits_e_bits_egress_id), // @[Tilelink.scala:435:35]
.io_ingress_2_flit_ready (_noc_io_ingress_2_flit_ready),
.io_ingress_2_flit_valid (_nif_master_be_2_io_flits_e_valid), // @[Tilelink.scala:435:35]
.io_ingress_2_flit_bits_head (_nif_master_be_2_io_flits_e_bits_head), // @[Tilelink.scala:435:35]
.io_ingress_2_flit_bits_payload ({1'h0, _nif_master_be_2_io_flits_e_bits_payload}), // @[Tilelink.scala:435:35, :451:35]
.io_ingress_2_flit_bits_egress_id (_nif_master_be_2_io_flits_e_bits_egress_id), // @[Tilelink.scala:435:35]
.io_ingress_1_flit_ready (_noc_io_ingress_1_flit_ready),
.io_ingress_1_flit_valid (_nif_master_be_1_io_flits_e_valid), // @[Tilelink.scala:435:35]
.io_ingress_1_flit_bits_head (_nif_master_be_1_io_flits_e_bits_head), // @[Tilelink.scala:435:35]
.io_ingress_1_flit_bits_payload ({1'h0, _nif_master_be_1_io_flits_e_bits_payload}), // @[Tilelink.scala:435:35, :451:35]
.io_ingress_1_flit_bits_egress_id (_nif_master_be_1_io_flits_e_bits_egress_id), // @[Tilelink.scala:435:35]
.io_ingress_0_flit_ready (_noc_io_ingress_0_flit_ready),
.io_ingress_0_flit_valid (_nif_master_be_io_flits_e_valid), // @[Tilelink.scala:435:35]
.io_ingress_0_flit_bits_head (_nif_master_be_io_flits_e_bits_head), // @[Tilelink.scala:435:35]
.io_ingress_0_flit_bits_payload ({1'h0, _nif_master_be_io_flits_e_bits_payload}), // @[Tilelink.scala:435:35, :451:35]
.io_ingress_0_flit_bits_egress_id (_nif_master_be_io_flits_e_bits_egress_id), // @[Tilelink.scala:435:35]
.io_egress_13_flit_valid (_noc_io_egress_13_flit_valid),
.io_egress_13_flit_bits_head (_noc_io_egress_13_flit_bits_head),
.io_egress_13_flit_bits_tail (_noc_io_egress_13_flit_bits_tail),
.io_egress_13_flit_bits_payload (_noc_io_egress_13_flit_bits_payload),
.io_egress_12_flit_valid (_noc_io_egress_12_flit_valid),
.io_egress_12_flit_bits_head (_noc_io_egress_12_flit_bits_head),
.io_egress_12_flit_bits_tail (_noc_io_egress_12_flit_bits_tail),
.io_egress_12_flit_bits_payload (_noc_io_egress_12_flit_bits_payload),
.io_egress_11_flit_valid (_noc_io_egress_11_flit_valid),
.io_egress_11_flit_bits_head (_noc_io_egress_11_flit_bits_head),
.io_egress_11_flit_bits_tail (_noc_io_egress_11_flit_bits_tail),
.io_egress_11_flit_bits_payload (_noc_io_egress_11_flit_bits_payload),
.io_egress_10_flit_valid (_noc_io_egress_10_flit_valid),
.io_egress_10_flit_bits_head (_noc_io_egress_10_flit_bits_head),
.io_egress_10_flit_bits_tail (_noc_io_egress_10_flit_bits_tail),
.io_egress_10_flit_bits_payload (_noc_io_egress_10_flit_bits_payload),
.io_egress_9_flit_ready (_nif_slave_be_io_flits_e_ready), // @[Tilelink.scala:455:34]
.io_egress_9_flit_valid (_noc_io_egress_9_flit_valid),
.io_egress_9_flit_bits_head (_noc_io_egress_9_flit_bits_head),
.io_egress_9_flit_bits_tail (_noc_io_egress_9_flit_bits_tail),
.io_egress_8_flit_ready (_nif_master_be_8_io_flits_b_ready), // @[Tilelink.scala:435:35]
.io_egress_8_flit_valid (_noc_io_egress_8_flit_valid),
.io_egress_8_flit_bits_head (_noc_io_egress_8_flit_bits_head),
.io_egress_8_flit_bits_tail (_noc_io_egress_8_flit_bits_tail),
.io_egress_8_flit_bits_payload (_noc_io_egress_8_flit_bits_payload),
.io_egress_7_flit_ready (_nif_master_be_7_io_flits_b_ready), // @[Tilelink.scala:435:35]
.io_egress_7_flit_valid (_noc_io_egress_7_flit_valid),
.io_egress_7_flit_bits_head (_noc_io_egress_7_flit_bits_head),
.io_egress_7_flit_bits_tail (_noc_io_egress_7_flit_bits_tail),
.io_egress_7_flit_bits_payload (_noc_io_egress_7_flit_bits_payload),
.io_egress_6_flit_ready (_nif_master_be_6_io_flits_b_ready), // @[Tilelink.scala:435:35]
.io_egress_6_flit_valid (_noc_io_egress_6_flit_valid),
.io_egress_6_flit_bits_head (_noc_io_egress_6_flit_bits_head),
.io_egress_6_flit_bits_tail (_noc_io_egress_6_flit_bits_tail),
.io_egress_6_flit_bits_payload (_noc_io_egress_6_flit_bits_payload),
.io_egress_5_flit_ready (_nif_master_be_5_io_flits_b_ready), // @[Tilelink.scala:435:35]
.io_egress_5_flit_valid (_noc_io_egress_5_flit_valid),
.io_egress_5_flit_bits_head (_noc_io_egress_5_flit_bits_head),
.io_egress_5_flit_bits_tail (_noc_io_egress_5_flit_bits_tail),
.io_egress_5_flit_bits_payload (_noc_io_egress_5_flit_bits_payload),
.io_egress_4_flit_ready (_nif_master_be_4_io_flits_b_ready), // @[Tilelink.scala:435:35]
.io_egress_4_flit_valid (_noc_io_egress_4_flit_valid),
.io_egress_4_flit_bits_head (_noc_io_egress_4_flit_bits_head),
.io_egress_4_flit_bits_tail (_noc_io_egress_4_flit_bits_tail),
.io_egress_4_flit_bits_payload (_noc_io_egress_4_flit_bits_payload),
.io_egress_3_flit_ready (_nif_master_be_3_io_flits_b_ready), // @[Tilelink.scala:435:35]
.io_egress_3_flit_valid (_noc_io_egress_3_flit_valid),
.io_egress_3_flit_bits_head (_noc_io_egress_3_flit_bits_head),
.io_egress_3_flit_bits_tail (_noc_io_egress_3_flit_bits_tail),
.io_egress_3_flit_bits_payload (_noc_io_egress_3_flit_bits_payload),
.io_egress_2_flit_ready (_nif_master_be_2_io_flits_b_ready), // @[Tilelink.scala:435:35]
.io_egress_2_flit_valid (_noc_io_egress_2_flit_valid),
.io_egress_2_flit_bits_head (_noc_io_egress_2_flit_bits_head),
.io_egress_2_flit_bits_tail (_noc_io_egress_2_flit_bits_tail),
.io_egress_2_flit_bits_payload (_noc_io_egress_2_flit_bits_payload),
.io_egress_1_flit_ready (_nif_master_be_1_io_flits_b_ready), // @[Tilelink.scala:435:35]
.io_egress_1_flit_valid (_noc_io_egress_1_flit_valid),
.io_egress_1_flit_bits_head (_noc_io_egress_1_flit_bits_head),
.io_egress_1_flit_bits_tail (_noc_io_egress_1_flit_bits_tail),
.io_egress_1_flit_bits_payload (_noc_io_egress_1_flit_bits_payload),
.io_egress_0_flit_ready (_nif_master_be_io_flits_b_ready), // @[Tilelink.scala:435:35]
.io_egress_0_flit_valid (_noc_io_egress_0_flit_valid),
.io_egress_0_flit_bits_head (_noc_io_egress_0_flit_bits_head),
.io_egress_0_flit_bits_tail (_noc_io_egress_0_flit_bits_tail),
.io_router_clocks_0_clock (clock),
.io_router_clocks_0_reset (reset),
.io_router_clocks_1_clock (clock),
.io_router_clocks_1_reset (reset),
.io_router_clocks_2_clock (clock),
.io_router_clocks_2_reset (reset),
.io_router_clocks_3_clock (clock),
.io_router_clocks_3_reset (reset),
.io_router_clocks_4_clock (clock),
.io_router_clocks_4_reset (reset),
.io_router_clocks_5_clock (clock),
.io_router_clocks_5_reset (reset),
.io_router_clocks_6_clock (clock),
.io_router_clocks_6_reset (reset),
.io_router_clocks_7_clock (clock),
.io_router_clocks_7_reset (reset),
.io_router_clocks_8_clock (clock),
.io_router_clocks_8_reset (reset),
.io_router_clocks_9_clock (clock),
.io_router_clocks_9_reset (reset),
.io_router_clocks_10_clock (clock),
.io_router_clocks_10_reset (reset),
.io_router_clocks_11_clock (clock),
.io_router_clocks_11_reset (reset),
.io_router_clocks_12_clock (clock),
.io_router_clocks_12_reset (reset)
); // @[Protocol.scala:116:19]
TLMasterBEToNoC nif_master_be ( // @[Tilelink.scala:435:35]
.clock (clock),
.reset (reset),
.io_flits_b_ready (_nif_master_be_io_flits_b_ready),
.io_flits_b_valid (_noc_io_egress_0_flit_valid), // @[Protocol.scala:116:19]
.io_flits_b_bits_head (_noc_io_egress_0_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_b_bits_tail (_noc_io_egress_0_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_e_ready (_noc_io_ingress_0_flit_ready), // @[Protocol.scala:116:19]
.io_flits_e_valid (_nif_master_be_io_flits_e_valid),
.io_flits_e_bits_head (_nif_master_be_io_flits_e_bits_head),
.io_flits_e_bits_payload (_nif_master_be_io_flits_e_bits_payload),
.io_flits_e_bits_egress_id (_nif_master_be_io_flits_e_bits_egress_id)
); // @[Tilelink.scala:435:35]
TLMasterBEToNoC_1 nif_master_be_1 ( // @[Tilelink.scala:435:35]
.clock (clock),
.reset (reset),
.io_tilelink_b_ready (io_protocol_0_in_1_b_ready),
.io_tilelink_b_valid (io_protocol_0_in_1_b_valid),
.io_tilelink_b_bits_opcode (io_protocol_0_in_1_b_bits_opcode),
.io_tilelink_b_bits_param (io_protocol_0_in_1_b_bits_param),
.io_tilelink_b_bits_size (io_protocol_0_in_1_b_bits_size),
.io_tilelink_b_bits_source (_nif_master_be_1_io_tilelink_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_in_1_b_bits_address),
.io_tilelink_b_bits_mask (io_protocol_0_in_1_b_bits_mask),
.io_tilelink_b_bits_data (io_protocol_0_in_1_b_bits_data),
.io_tilelink_b_bits_corrupt (io_protocol_0_in_1_b_bits_corrupt),
.io_tilelink_e_ready (io_protocol_0_in_1_e_ready),
.io_tilelink_e_valid (io_protocol_0_in_1_e_valid),
.io_tilelink_e_bits_sink (io_protocol_0_in_1_e_bits_sink),
.io_flits_b_ready (_nif_master_be_1_io_flits_b_ready),
.io_flits_b_valid (_noc_io_egress_1_flit_valid), // @[Protocol.scala:116:19]
.io_flits_b_bits_head (_noc_io_egress_1_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_b_bits_tail (_noc_io_egress_1_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_b_bits_payload (_noc_io_egress_1_flit_bits_payload[72:0]), // @[Tilelink.scala:452:34]
.io_flits_e_ready (_noc_io_ingress_1_flit_ready), // @[Protocol.scala:116:19]
.io_flits_e_valid (_nif_master_be_1_io_flits_e_valid),
.io_flits_e_bits_head (_nif_master_be_1_io_flits_e_bits_head),
.io_flits_e_bits_payload (_nif_master_be_1_io_flits_e_bits_payload),
.io_flits_e_bits_egress_id (_nif_master_be_1_io_flits_e_bits_egress_id)
); // @[Tilelink.scala:435:35]
TLMasterBEToNoC_1 nif_master_be_2 ( // @[Tilelink.scala:435:35]
.clock (clock),
.reset (reset),
.io_tilelink_b_ready (io_protocol_0_in_2_b_ready),
.io_tilelink_b_valid (io_protocol_0_in_2_b_valid),
.io_tilelink_b_bits_opcode (io_protocol_0_in_2_b_bits_opcode),
.io_tilelink_b_bits_param (io_protocol_0_in_2_b_bits_param),
.io_tilelink_b_bits_size (io_protocol_0_in_2_b_bits_size),
.io_tilelink_b_bits_source (_nif_master_be_2_io_tilelink_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_in_2_b_bits_address),
.io_tilelink_b_bits_mask (io_protocol_0_in_2_b_bits_mask),
.io_tilelink_b_bits_data (io_protocol_0_in_2_b_bits_data),
.io_tilelink_b_bits_corrupt (io_protocol_0_in_2_b_bits_corrupt),
.io_tilelink_e_ready (io_protocol_0_in_2_e_ready),
.io_tilelink_e_valid (io_protocol_0_in_2_e_valid),
.io_tilelink_e_bits_sink (io_protocol_0_in_2_e_bits_sink),
.io_flits_b_ready (_nif_master_be_2_io_flits_b_ready),
.io_flits_b_valid (_noc_io_egress_2_flit_valid), // @[Protocol.scala:116:19]
.io_flits_b_bits_head (_noc_io_egress_2_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_b_bits_tail (_noc_io_egress_2_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_b_bits_payload (_noc_io_egress_2_flit_bits_payload[72:0]), // @[Tilelink.scala:452:34]
.io_flits_e_ready (_noc_io_ingress_2_flit_ready), // @[Protocol.scala:116:19]
.io_flits_e_valid (_nif_master_be_2_io_flits_e_valid),
.io_flits_e_bits_head (_nif_master_be_2_io_flits_e_bits_head),
.io_flits_e_bits_payload (_nif_master_be_2_io_flits_e_bits_payload),
.io_flits_e_bits_egress_id (_nif_master_be_2_io_flits_e_bits_egress_id)
); // @[Tilelink.scala:435:35]
TLMasterBEToNoC_1 nif_master_be_3 ( // @[Tilelink.scala:435:35]
.clock (clock),
.reset (reset),
.io_tilelink_b_ready (io_protocol_0_in_3_b_ready),
.io_tilelink_b_valid (io_protocol_0_in_3_b_valid),
.io_tilelink_b_bits_opcode (io_protocol_0_in_3_b_bits_opcode),
.io_tilelink_b_bits_param (io_protocol_0_in_3_b_bits_param),
.io_tilelink_b_bits_size (io_protocol_0_in_3_b_bits_size),
.io_tilelink_b_bits_source (_nif_master_be_3_io_tilelink_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_in_3_b_bits_address),
.io_tilelink_b_bits_mask (io_protocol_0_in_3_b_bits_mask),
.io_tilelink_b_bits_data (io_protocol_0_in_3_b_bits_data),
.io_tilelink_b_bits_corrupt (io_protocol_0_in_3_b_bits_corrupt),
.io_tilelink_e_ready (io_protocol_0_in_3_e_ready),
.io_tilelink_e_valid (io_protocol_0_in_3_e_valid),
.io_tilelink_e_bits_sink (io_protocol_0_in_3_e_bits_sink),
.io_flits_b_ready (_nif_master_be_3_io_flits_b_ready),
.io_flits_b_valid (_noc_io_egress_3_flit_valid), // @[Protocol.scala:116:19]
.io_flits_b_bits_head (_noc_io_egress_3_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_b_bits_tail (_noc_io_egress_3_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_b_bits_payload (_noc_io_egress_3_flit_bits_payload[72:0]), // @[Tilelink.scala:452:34]
.io_flits_e_ready (_noc_io_ingress_3_flit_ready), // @[Protocol.scala:116:19]
.io_flits_e_valid (_nif_master_be_3_io_flits_e_valid),
.io_flits_e_bits_head (_nif_master_be_3_io_flits_e_bits_head),
.io_flits_e_bits_payload (_nif_master_be_3_io_flits_e_bits_payload),
.io_flits_e_bits_egress_id (_nif_master_be_3_io_flits_e_bits_egress_id)
); // @[Tilelink.scala:435:35]
TLMasterBEToNoC_1 nif_master_be_4 ( // @[Tilelink.scala:435:35]
.clock (clock),
.reset (reset),
.io_tilelink_b_ready (io_protocol_0_in_4_b_ready),
.io_tilelink_b_valid (io_protocol_0_in_4_b_valid),
.io_tilelink_b_bits_opcode (io_protocol_0_in_4_b_bits_opcode),
.io_tilelink_b_bits_param (io_protocol_0_in_4_b_bits_param),
.io_tilelink_b_bits_size (io_protocol_0_in_4_b_bits_size),
.io_tilelink_b_bits_source (_nif_master_be_4_io_tilelink_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_in_4_b_bits_address),
.io_tilelink_b_bits_mask (io_protocol_0_in_4_b_bits_mask),
.io_tilelink_b_bits_data (io_protocol_0_in_4_b_bits_data),
.io_tilelink_b_bits_corrupt (io_protocol_0_in_4_b_bits_corrupt),
.io_tilelink_e_ready (io_protocol_0_in_4_e_ready),
.io_tilelink_e_valid (io_protocol_0_in_4_e_valid),
.io_tilelink_e_bits_sink (io_protocol_0_in_4_e_bits_sink),
.io_flits_b_ready (_nif_master_be_4_io_flits_b_ready),
.io_flits_b_valid (_noc_io_egress_4_flit_valid), // @[Protocol.scala:116:19]
.io_flits_b_bits_head (_noc_io_egress_4_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_b_bits_tail (_noc_io_egress_4_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_b_bits_payload (_noc_io_egress_4_flit_bits_payload[72:0]), // @[Tilelink.scala:452:34]
.io_flits_e_ready (_noc_io_ingress_4_flit_ready), // @[Protocol.scala:116:19]
.io_flits_e_valid (_nif_master_be_4_io_flits_e_valid),
.io_flits_e_bits_head (_nif_master_be_4_io_flits_e_bits_head),
.io_flits_e_bits_payload (_nif_master_be_4_io_flits_e_bits_payload),
.io_flits_e_bits_egress_id (_nif_master_be_4_io_flits_e_bits_egress_id)
); // @[Tilelink.scala:435:35]
TLMasterBEToNoC_1 nif_master_be_5 ( // @[Tilelink.scala:435:35]
.clock (clock),
.reset (reset),
.io_tilelink_b_ready (io_protocol_0_in_5_b_ready),
.io_tilelink_b_valid (io_protocol_0_in_5_b_valid),
.io_tilelink_b_bits_opcode (io_protocol_0_in_5_b_bits_opcode),
.io_tilelink_b_bits_param (io_protocol_0_in_5_b_bits_param),
.io_tilelink_b_bits_size (io_protocol_0_in_5_b_bits_size),
.io_tilelink_b_bits_source (_nif_master_be_5_io_tilelink_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_in_5_b_bits_address),
.io_tilelink_b_bits_mask (io_protocol_0_in_5_b_bits_mask),
.io_tilelink_b_bits_data (io_protocol_0_in_5_b_bits_data),
.io_tilelink_b_bits_corrupt (io_protocol_0_in_5_b_bits_corrupt),
.io_tilelink_e_ready (io_protocol_0_in_5_e_ready),
.io_tilelink_e_valid (io_protocol_0_in_5_e_valid),
.io_tilelink_e_bits_sink (io_protocol_0_in_5_e_bits_sink),
.io_flits_b_ready (_nif_master_be_5_io_flits_b_ready),
.io_flits_b_valid (_noc_io_egress_5_flit_valid), // @[Protocol.scala:116:19]
.io_flits_b_bits_head (_noc_io_egress_5_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_b_bits_tail (_noc_io_egress_5_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_b_bits_payload (_noc_io_egress_5_flit_bits_payload[72:0]), // @[Tilelink.scala:452:34]
.io_flits_e_ready (_noc_io_ingress_5_flit_ready), // @[Protocol.scala:116:19]
.io_flits_e_valid (_nif_master_be_5_io_flits_e_valid),
.io_flits_e_bits_head (_nif_master_be_5_io_flits_e_bits_head),
.io_flits_e_bits_payload (_nif_master_be_5_io_flits_e_bits_payload),
.io_flits_e_bits_egress_id (_nif_master_be_5_io_flits_e_bits_egress_id)
); // @[Tilelink.scala:435:35]
TLMasterBEToNoC_1 nif_master_be_6 ( // @[Tilelink.scala:435:35]
.clock (clock),
.reset (reset),
.io_tilelink_b_ready (io_protocol_0_in_6_b_ready),
.io_tilelink_b_valid (io_protocol_0_in_6_b_valid),
.io_tilelink_b_bits_opcode (io_protocol_0_in_6_b_bits_opcode),
.io_tilelink_b_bits_param (io_protocol_0_in_6_b_bits_param),
.io_tilelink_b_bits_size (io_protocol_0_in_6_b_bits_size),
.io_tilelink_b_bits_source (_nif_master_be_6_io_tilelink_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_in_6_b_bits_address),
.io_tilelink_b_bits_mask (io_protocol_0_in_6_b_bits_mask),
.io_tilelink_b_bits_data (io_protocol_0_in_6_b_bits_data),
.io_tilelink_b_bits_corrupt (io_protocol_0_in_6_b_bits_corrupt),
.io_tilelink_e_ready (io_protocol_0_in_6_e_ready),
.io_tilelink_e_valid (io_protocol_0_in_6_e_valid),
.io_tilelink_e_bits_sink (io_protocol_0_in_6_e_bits_sink),
.io_flits_b_ready (_nif_master_be_6_io_flits_b_ready),
.io_flits_b_valid (_noc_io_egress_6_flit_valid), // @[Protocol.scala:116:19]
.io_flits_b_bits_head (_noc_io_egress_6_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_b_bits_tail (_noc_io_egress_6_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_b_bits_payload (_noc_io_egress_6_flit_bits_payload[72:0]), // @[Tilelink.scala:452:34]
.io_flits_e_ready (_noc_io_ingress_6_flit_ready), // @[Protocol.scala:116:19]
.io_flits_e_valid (_nif_master_be_6_io_flits_e_valid),
.io_flits_e_bits_head (_nif_master_be_6_io_flits_e_bits_head),
.io_flits_e_bits_payload (_nif_master_be_6_io_flits_e_bits_payload),
.io_flits_e_bits_egress_id (_nif_master_be_6_io_flits_e_bits_egress_id)
); // @[Tilelink.scala:435:35]
TLMasterBEToNoC_1 nif_master_be_7 ( // @[Tilelink.scala:435:35]
.clock (clock),
.reset (reset),
.io_tilelink_b_ready (io_protocol_0_in_7_b_ready),
.io_tilelink_b_valid (io_protocol_0_in_7_b_valid),
.io_tilelink_b_bits_opcode (io_protocol_0_in_7_b_bits_opcode),
.io_tilelink_b_bits_param (io_protocol_0_in_7_b_bits_param),
.io_tilelink_b_bits_size (io_protocol_0_in_7_b_bits_size),
.io_tilelink_b_bits_source (_nif_master_be_7_io_tilelink_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_in_7_b_bits_address),
.io_tilelink_b_bits_mask (io_protocol_0_in_7_b_bits_mask),
.io_tilelink_b_bits_data (io_protocol_0_in_7_b_bits_data),
.io_tilelink_b_bits_corrupt (io_protocol_0_in_7_b_bits_corrupt),
.io_tilelink_e_ready (io_protocol_0_in_7_e_ready),
.io_tilelink_e_valid (io_protocol_0_in_7_e_valid),
.io_tilelink_e_bits_sink (io_protocol_0_in_7_e_bits_sink),
.io_flits_b_ready (_nif_master_be_7_io_flits_b_ready),
.io_flits_b_valid (_noc_io_egress_7_flit_valid), // @[Protocol.scala:116:19]
.io_flits_b_bits_head (_noc_io_egress_7_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_b_bits_tail (_noc_io_egress_7_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_b_bits_payload (_noc_io_egress_7_flit_bits_payload[72:0]), // @[Tilelink.scala:452:34]
.io_flits_e_ready (_noc_io_ingress_7_flit_ready), // @[Protocol.scala:116:19]
.io_flits_e_valid (_nif_master_be_7_io_flits_e_valid),
.io_flits_e_bits_head (_nif_master_be_7_io_flits_e_bits_head),
.io_flits_e_bits_payload (_nif_master_be_7_io_flits_e_bits_payload),
.io_flits_e_bits_egress_id (_nif_master_be_7_io_flits_e_bits_egress_id)
); // @[Tilelink.scala:435:35]
TLMasterBEToNoC_1 nif_master_be_8 ( // @[Tilelink.scala:435:35]
.clock (clock),
.reset (reset),
.io_tilelink_b_ready (io_protocol_0_in_8_b_ready),
.io_tilelink_b_valid (io_protocol_0_in_8_b_valid),
.io_tilelink_b_bits_opcode (io_protocol_0_in_8_b_bits_opcode),
.io_tilelink_b_bits_param (io_protocol_0_in_8_b_bits_param),
.io_tilelink_b_bits_size (io_protocol_0_in_8_b_bits_size),
.io_tilelink_b_bits_source (_nif_master_be_8_io_tilelink_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_in_8_b_bits_address),
.io_tilelink_b_bits_mask (io_protocol_0_in_8_b_bits_mask),
.io_tilelink_b_bits_data (io_protocol_0_in_8_b_bits_data),
.io_tilelink_b_bits_corrupt (io_protocol_0_in_8_b_bits_corrupt),
.io_tilelink_e_ready (io_protocol_0_in_8_e_ready),
.io_tilelink_e_valid (io_protocol_0_in_8_e_valid),
.io_tilelink_e_bits_sink (io_protocol_0_in_8_e_bits_sink),
.io_flits_b_ready (_nif_master_be_8_io_flits_b_ready),
.io_flits_b_valid (_noc_io_egress_8_flit_valid), // @[Protocol.scala:116:19]
.io_flits_b_bits_head (_noc_io_egress_8_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_b_bits_tail (_noc_io_egress_8_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_b_bits_payload (_noc_io_egress_8_flit_bits_payload[72:0]), // @[Tilelink.scala:452:34]
.io_flits_e_ready (_noc_io_ingress_8_flit_ready), // @[Protocol.scala:116:19]
.io_flits_e_valid (_nif_master_be_8_io_flits_e_valid),
.io_flits_e_bits_head (_nif_master_be_8_io_flits_e_bits_head),
.io_flits_e_bits_payload (_nif_master_be_8_io_flits_e_bits_payload),
.io_flits_e_bits_egress_id (_nif_master_be_8_io_flits_e_bits_egress_id)
); // @[Tilelink.scala:435:35]
TLSlaveBEToNoC nif_slave_be ( // @[Tilelink.scala:455:34]
.clock (clock),
.reset (reset),
.io_flits_b_ready (_noc_io_ingress_9_flit_ready), // @[Protocol.scala:116:19]
.io_flits_b_valid (_nif_slave_be_io_flits_b_valid),
.io_flits_b_bits_head (_nif_slave_be_io_flits_b_bits_head),
.io_flits_b_bits_tail (_nif_slave_be_io_flits_b_bits_tail),
.io_flits_b_bits_payload (_nif_slave_be_io_flits_b_bits_payload),
.io_flits_b_bits_egress_id (_nif_slave_be_io_flits_b_bits_egress_id),
.io_flits_e_ready (_nif_slave_be_io_flits_e_ready),
.io_flits_e_valid (_noc_io_egress_9_flit_valid), // @[Protocol.scala:116:19]
.io_flits_e_bits_head (_noc_io_egress_9_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_e_bits_tail (_noc_io_egress_9_flit_bits_tail) // @[Protocol.scala:116:19]
); // @[Tilelink.scala:455:34]
TLSlaveBEToNoC_1 nif_slave_be_1 ( // @[Tilelink.scala:455:34]
.clock (clock),
.reset (reset),
.io_tilelink_b_ready (io_protocol_0_out_1_b_ready),
.io_tilelink_b_valid (io_protocol_0_out_1_b_valid),
.io_tilelink_b_bits_param (io_protocol_0_out_1_b_bits_param),
.io_tilelink_b_bits_source (io_protocol_0_out_1_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_out_1_b_bits_address),
.io_tilelink_e_valid (io_protocol_0_out_1_e_valid),
.io_tilelink_e_bits_sink (_nif_slave_be_1_io_tilelink_e_bits_sink),
.io_flits_b_ready (_noc_io_ingress_10_flit_ready), // @[Protocol.scala:116:19]
.io_flits_b_valid (_nif_slave_be_1_io_flits_b_valid),
.io_flits_b_bits_head (_nif_slave_be_1_io_flits_b_bits_head),
.io_flits_b_bits_tail (_nif_slave_be_1_io_flits_b_bits_tail),
.io_flits_b_bits_payload (_nif_slave_be_1_io_flits_b_bits_payload),
.io_flits_b_bits_egress_id (_nif_slave_be_1_io_flits_b_bits_egress_id),
.io_flits_e_valid (_noc_io_egress_10_flit_valid), // @[Protocol.scala:116:19]
.io_flits_e_bits_head (_noc_io_egress_10_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_e_bits_tail (_noc_io_egress_10_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_e_bits_payload (_noc_io_egress_10_flit_bits_payload[72:0]) // @[Tilelink.scala:471:33]
); // @[Tilelink.scala:455:34]
TLSlaveBEToNoC_1 nif_slave_be_2 ( // @[Tilelink.scala:455:34]
.clock (clock),
.reset (reset),
.io_tilelink_b_ready (io_protocol_0_out_2_b_ready),
.io_tilelink_b_valid (io_protocol_0_out_2_b_valid),
.io_tilelink_b_bits_param (io_protocol_0_out_2_b_bits_param),
.io_tilelink_b_bits_source (io_protocol_0_out_2_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_out_2_b_bits_address),
.io_tilelink_e_valid (io_protocol_0_out_2_e_valid),
.io_tilelink_e_bits_sink (_nif_slave_be_2_io_tilelink_e_bits_sink),
.io_flits_b_ready (_noc_io_ingress_11_flit_ready), // @[Protocol.scala:116:19]
.io_flits_b_valid (_nif_slave_be_2_io_flits_b_valid),
.io_flits_b_bits_head (_nif_slave_be_2_io_flits_b_bits_head),
.io_flits_b_bits_tail (_nif_slave_be_2_io_flits_b_bits_tail),
.io_flits_b_bits_payload (_nif_slave_be_2_io_flits_b_bits_payload),
.io_flits_b_bits_egress_id (_nif_slave_be_2_io_flits_b_bits_egress_id),
.io_flits_e_valid (_noc_io_egress_11_flit_valid), // @[Protocol.scala:116:19]
.io_flits_e_bits_head (_noc_io_egress_11_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_e_bits_tail (_noc_io_egress_11_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_e_bits_payload (_noc_io_egress_11_flit_bits_payload[72:0]) // @[Tilelink.scala:471:33]
); // @[Tilelink.scala:455:34]
TLSlaveBEToNoC_1 nif_slave_be_3 ( // @[Tilelink.scala:455:34]
.clock (clock),
.reset (reset),
.io_tilelink_b_ready (io_protocol_0_out_3_b_ready),
.io_tilelink_b_valid (io_protocol_0_out_3_b_valid),
.io_tilelink_b_bits_param (io_protocol_0_out_3_b_bits_param),
.io_tilelink_b_bits_source (io_protocol_0_out_3_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_out_3_b_bits_address),
.io_tilelink_e_valid (io_protocol_0_out_3_e_valid),
.io_tilelink_e_bits_sink (_nif_slave_be_3_io_tilelink_e_bits_sink),
.io_flits_b_ready (_noc_io_ingress_12_flit_ready), // @[Protocol.scala:116:19]
.io_flits_b_valid (_nif_slave_be_3_io_flits_b_valid),
.io_flits_b_bits_head (_nif_slave_be_3_io_flits_b_bits_head),
.io_flits_b_bits_tail (_nif_slave_be_3_io_flits_b_bits_tail),
.io_flits_b_bits_payload (_nif_slave_be_3_io_flits_b_bits_payload),
.io_flits_b_bits_egress_id (_nif_slave_be_3_io_flits_b_bits_egress_id),
.io_flits_e_valid (_noc_io_egress_12_flit_valid), // @[Protocol.scala:116:19]
.io_flits_e_bits_head (_noc_io_egress_12_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_e_bits_tail (_noc_io_egress_12_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_e_bits_payload (_noc_io_egress_12_flit_bits_payload[72:0]) // @[Tilelink.scala:471:33]
); // @[Tilelink.scala:455:34]
TLSlaveBEToNoC_1 nif_slave_be_4 ( // @[Tilelink.scala:455:34]
.clock (clock),
.reset (reset),
.io_tilelink_b_ready (io_protocol_0_out_4_b_ready),
.io_tilelink_b_valid (io_protocol_0_out_4_b_valid),
.io_tilelink_b_bits_param (io_protocol_0_out_4_b_bits_param),
.io_tilelink_b_bits_source (io_protocol_0_out_4_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_out_4_b_bits_address),
.io_tilelink_e_valid (io_protocol_0_out_4_e_valid),
.io_tilelink_e_bits_sink (_nif_slave_be_4_io_tilelink_e_bits_sink),
.io_flits_b_ready (_noc_io_ingress_13_flit_ready), // @[Protocol.scala:116:19]
.io_flits_b_valid (_nif_slave_be_4_io_flits_b_valid),
.io_flits_b_bits_head (_nif_slave_be_4_io_flits_b_bits_head),
.io_flits_b_bits_tail (_nif_slave_be_4_io_flits_b_bits_tail),
.io_flits_b_bits_payload (_nif_slave_be_4_io_flits_b_bits_payload),
.io_flits_b_bits_egress_id (_nif_slave_be_4_io_flits_b_bits_egress_id),
.io_flits_e_valid (_noc_io_egress_13_flit_valid), // @[Protocol.scala:116:19]
.io_flits_e_bits_head (_noc_io_egress_13_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_e_bits_tail (_noc_io_egress_13_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_e_bits_payload (_noc_io_egress_13_flit_bits_payload[72:0]) // @[Tilelink.scala:471:33]
); // @[Tilelink.scala:455:34]
assign io_protocol_0_in_8_b_bits_source = _nif_master_be_8_io_tilelink_b_bits_source[1:0]; // @[Tilelink.scala:238:32, :435:35]
assign io_protocol_0_in_7_b_bits_source = _nif_master_be_7_io_tilelink_b_bits_source[1:0]; // @[Tilelink.scala:238:32, :435:35]
assign io_protocol_0_in_6_b_bits_source = _nif_master_be_6_io_tilelink_b_bits_source[1:0]; // @[Tilelink.scala:238:32, :435:35]
assign io_protocol_0_in_5_b_bits_source = _nif_master_be_5_io_tilelink_b_bits_source[1:0]; // @[Tilelink.scala:238:32, :435:35]
assign io_protocol_0_in_4_b_bits_source = _nif_master_be_4_io_tilelink_b_bits_source[1:0]; // @[Tilelink.scala:238:32, :435:35]
assign io_protocol_0_in_3_b_bits_source = _nif_master_be_3_io_tilelink_b_bits_source[1:0]; // @[Tilelink.scala:238:32, :435:35]
assign io_protocol_0_in_2_b_bits_source = _nif_master_be_2_io_tilelink_b_bits_source[1:0]; // @[Tilelink.scala:238:32, :435:35]
assign io_protocol_0_in_1_b_bits_source = _nif_master_be_1_io_tilelink_b_bits_source[1:0]; // @[Tilelink.scala:238:32, :435:35]
assign io_protocol_0_out_4_e_bits_sink = _nif_slave_be_4_io_tilelink_e_bits_sink[2:0]; // @[Tilelink.scala:238:32, :455:34]
assign io_protocol_0_out_3_e_bits_sink = _nif_slave_be_3_io_tilelink_e_bits_sink[2:0]; // @[Tilelink.scala:238:32, :455:34]
assign io_protocol_0_out_2_e_bits_sink = _nif_slave_be_2_io_tilelink_e_bits_sink[2:0]; // @[Tilelink.scala:238:32, :455:34]
assign io_protocol_0_out_1_e_bits_sink = _nif_slave_be_1_io_tilelink_e_bits_sink[2:0]; // @[Tilelink.scala:238:32, :455:34]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Replacement.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import freechips.rocketchip.util.property.cover
abstract class ReplacementPolicy {
def nBits: Int
def perSet: Boolean
def way: UInt
def miss: Unit
def hit: Unit
def access(touch_way: UInt): Unit
def access(touch_ways: Seq[Valid[UInt]]): Unit
def state_read: UInt
def get_next_state(state: UInt, touch_way: UInt): UInt
def get_next_state(state: UInt, touch_ways: Seq[Valid[UInt]]): UInt = {
touch_ways.foldLeft(state)((prev, touch_way) => Mux(touch_way.valid, get_next_state(prev, touch_way.bits), prev))
}
def get_replace_way(state: UInt): UInt
}
object ReplacementPolicy {
def fromString(s: String, n_ways: Int): ReplacementPolicy = s.toLowerCase match {
case "random" => new RandomReplacement(n_ways)
case "lru" => new TrueLRU(n_ways)
case "plru" => new PseudoLRU(n_ways)
case t => throw new IllegalArgumentException(s"unknown Replacement Policy type $t")
}
}
class RandomReplacement(n_ways: Int) extends ReplacementPolicy {
private val replace = Wire(Bool())
replace := false.B
def nBits = 16
def perSet = false
private val lfsr = LFSR(nBits, replace)
def state_read = WireDefault(lfsr)
def way = Random(n_ways, lfsr)
def miss = replace := true.B
def hit = {}
def access(touch_way: UInt) = {}
def access(touch_ways: Seq[Valid[UInt]]) = {}
def get_next_state(state: UInt, touch_way: UInt) = 0.U //DontCare
def get_replace_way(state: UInt) = way
}
abstract class SeqReplacementPolicy {
def access(set: UInt): Unit
def update(valid: Bool, hit: Bool, set: UInt, way: UInt): Unit
def way: UInt
}
abstract class SetAssocReplacementPolicy {
def access(set: UInt, touch_way: UInt): Unit
def access(sets: Seq[UInt], touch_ways: Seq[Valid[UInt]]): Unit
def way(set: UInt): UInt
}
class SeqRandom(n_ways: Int) extends SeqReplacementPolicy {
val logic = new RandomReplacement(n_ways)
def access(set: UInt) = { }
def update(valid: Bool, hit: Bool, set: UInt, way: UInt) = {
when (valid && !hit) { logic.miss }
}
def way = logic.way
}
class TrueLRU(n_ways: Int) extends ReplacementPolicy {
// True LRU replacement policy, using a triangular matrix to track which sets are more recently used than others.
// The matrix is packed into a single UInt (or Bits). Example 4-way (6-bits):
// [5] - 3 more recent than 2
// [4] - 3 more recent than 1
// [3] - 2 more recent than 1
// [2] - 3 more recent than 0
// [1] - 2 more recent than 0
// [0] - 1 more recent than 0
def nBits = (n_ways * (n_ways-1)) / 2
def perSet = true
private val state_reg = RegInit(0.U(nBits.W))
def state_read = WireDefault(state_reg)
private def extractMRUVec(state: UInt): Seq[UInt] = {
// Extract per-way information about which higher-indexed ways are more recently used
val moreRecentVec = Wire(Vec(n_ways-1, UInt(n_ways.W)))
var lsb = 0
for (i <- 0 until n_ways-1) {
moreRecentVec(i) := Cat(state(lsb+n_ways-i-2,lsb), 0.U((i+1).W))
lsb = lsb + (n_ways - i - 1)
}
moreRecentVec
}
def get_next_state(state: UInt, touch_way: UInt): UInt = {
val nextState = Wire(Vec(n_ways-1, UInt(n_ways.W)))
val moreRecentVec = extractMRUVec(state) // reconstruct lower triangular matrix
val wayDec = UIntToOH(touch_way, n_ways)
// Compute next value of triangular matrix
// set the touched way as more recent than every other way
nextState.zipWithIndex.map { case (e, i) =>
e := Mux(i.U === touch_way, 0.U(n_ways.W), moreRecentVec(i) | wayDec)
}
nextState.zipWithIndex.tail.foldLeft((nextState.head.apply(n_ways-1,1),0)) { case ((pe,pi),(ce,ci)) => (Cat(ce.apply(n_ways-1,ci+1), pe), ci) }._1
}
def access(touch_way: UInt): Unit = {
state_reg := get_next_state(state_reg, touch_way)
}
def access(touch_ways: Seq[Valid[UInt]]): Unit = {
when (touch_ways.map(_.valid).orR) {
state_reg := get_next_state(state_reg, touch_ways)
}
for (i <- 1 until touch_ways.size) {
cover(PopCount(touch_ways.map(_.valid)) === i.U, s"LRU_UpdateCount$i", s"LRU Update $i simultaneous")
}
}
def get_replace_way(state: UInt): UInt = {
val moreRecentVec = extractMRUVec(state) // reconstruct lower triangular matrix
// For each way, determine if all other ways are more recent
val mruWayDec = (0 until n_ways).map { i =>
val upperMoreRecent = (if (i == n_ways-1) true.B else moreRecentVec(i).apply(n_ways-1,i+1).andR)
val lowerMoreRecent = (if (i == 0) true.B else moreRecentVec.map(e => !e(i)).reduce(_ && _))
upperMoreRecent && lowerMoreRecent
}
OHToUInt(mruWayDec)
}
def way = get_replace_way(state_reg)
def miss = access(way)
def hit = {}
@deprecated("replace 'replace' with 'way' from abstract class ReplacementPolicy","Rocket Chip 2020.05")
def replace: UInt = way
}
class PseudoLRU(n_ways: Int) extends ReplacementPolicy {
// Pseudo-LRU tree algorithm: https://en.wikipedia.org/wiki/Pseudo-LRU#Tree-PLRU
//
//
// - bits storage example for 4-way PLRU binary tree:
// bit[2]: ways 3+2 older than ways 1+0
// / \
// bit[1]: way 3 older than way 2 bit[0]: way 1 older than way 0
//
//
// - bits storage example for 3-way PLRU binary tree:
// bit[1]: way 2 older than ways 1+0
// \
// bit[0]: way 1 older than way 0
//
//
// - bits storage example for 8-way PLRU binary tree:
// bit[6]: ways 7-4 older than ways 3-0
// / \
// bit[5]: ways 7+6 > 5+4 bit[2]: ways 3+2 > 1+0
// / \ / \
// bit[4]: way 7>6 bit[3]: way 5>4 bit[1]: way 3>2 bit[0]: way 1>0
def nBits = n_ways - 1
def perSet = true
private val state_reg = if (nBits == 0) Reg(UInt(0.W)) else RegInit(0.U(nBits.W))
def state_read = WireDefault(state_reg)
def access(touch_way: UInt): Unit = {
state_reg := get_next_state(state_reg, touch_way)
}
def access(touch_ways: Seq[Valid[UInt]]): Unit = {
when (touch_ways.map(_.valid).orR) {
state_reg := get_next_state(state_reg, touch_ways)
}
for (i <- 1 until touch_ways.size) {
cover(PopCount(touch_ways.map(_.valid)) === i.U, s"PLRU_UpdateCount$i", s"PLRU Update $i simultaneous")
}
}
/** @param state state_reg bits for this sub-tree
* @param touch_way touched way encoded value bits for this sub-tree
* @param tree_nways number of ways in this sub-tree
*/
def get_next_state(state: UInt, touch_way: UInt, tree_nways: Int): UInt = {
require(state.getWidth == (tree_nways-1), s"wrong state bits width ${state.getWidth} for $tree_nways ways")
require(touch_way.getWidth == (log2Ceil(tree_nways) max 1), s"wrong encoded way width ${touch_way.getWidth} for $tree_nways ways")
if (tree_nways > 2) {
// we are at a branching node in the tree, so recurse
val right_nways: Int = 1 << (log2Ceil(tree_nways) - 1) // number of ways in the right sub-tree
val left_nways: Int = tree_nways - right_nways // number of ways in the left sub-tree
val set_left_older = !touch_way(log2Ceil(tree_nways)-1)
val left_subtree_state = state.extract(tree_nways-3, right_nways-1)
val right_subtree_state = state(right_nways-2, 0)
if (left_nways > 1) {
// we are at a branching node in the tree with both left and right sub-trees, so recurse both sub-trees
Cat(set_left_older,
Mux(set_left_older,
left_subtree_state, // if setting left sub-tree as older, do NOT recurse into left sub-tree
get_next_state(left_subtree_state, touch_way.extract(log2Ceil(left_nways)-1,0), left_nways)), // recurse left if newer
Mux(set_left_older,
get_next_state(right_subtree_state, touch_way(log2Ceil(right_nways)-1,0), right_nways), // recurse right if newer
right_subtree_state)) // if setting right sub-tree as older, do NOT recurse into right sub-tree
} else {
// we are at a branching node in the tree with only a right sub-tree, so recurse only right sub-tree
Cat(set_left_older,
Mux(set_left_older,
get_next_state(right_subtree_state, touch_way(log2Ceil(right_nways)-1,0), right_nways), // recurse right if newer
right_subtree_state)) // if setting right sub-tree as older, do NOT recurse into right sub-tree
}
} else if (tree_nways == 2) {
// we are at a leaf node at the end of the tree, so set the single state bit opposite of the lsb of the touched way encoded value
!touch_way(0)
} else { // tree_nways <= 1
// we are at an empty node in an empty tree for 1 way, so return single zero bit for Chisel (no zero-width wires)
0.U(1.W)
}
}
def get_next_state(state: UInt, touch_way: UInt): UInt = {
val touch_way_sized = if (touch_way.getWidth < log2Ceil(n_ways)) touch_way.padTo (log2Ceil(n_ways))
else touch_way.extract(log2Ceil(n_ways)-1,0)
get_next_state(state, touch_way_sized, n_ways)
}
/** @param state state_reg bits for this sub-tree
* @param tree_nways number of ways in this sub-tree
*/
def get_replace_way(state: UInt, tree_nways: Int): UInt = {
require(state.getWidth == (tree_nways-1), s"wrong state bits width ${state.getWidth} for $tree_nways ways")
// this algorithm recursively descends the binary tree, filling in the way-to-replace encoded value from msb to lsb
if (tree_nways > 2) {
// we are at a branching node in the tree, so recurse
val right_nways: Int = 1 << (log2Ceil(tree_nways) - 1) // number of ways in the right sub-tree
val left_nways: Int = tree_nways - right_nways // number of ways in the left sub-tree
val left_subtree_older = state(tree_nways-2)
val left_subtree_state = state.extract(tree_nways-3, right_nways-1)
val right_subtree_state = state(right_nways-2, 0)
if (left_nways > 1) {
// we are at a branching node in the tree with both left and right sub-trees, so recurse both sub-trees
Cat(left_subtree_older, // return the top state bit (current tree node) as msb of the way-to-replace encoded value
Mux(left_subtree_older, // if left sub-tree is older, recurse left, else recurse right
get_replace_way(left_subtree_state, left_nways), // recurse left
get_replace_way(right_subtree_state, right_nways))) // recurse right
} else {
// we are at a branching node in the tree with only a right sub-tree, so recurse only right sub-tree
Cat(left_subtree_older, // return the top state bit (current tree node) as msb of the way-to-replace encoded value
Mux(left_subtree_older, // if left sub-tree is older, return and do not recurse right
0.U(1.W),
get_replace_way(right_subtree_state, right_nways))) // recurse right
}
} else if (tree_nways == 2) {
// we are at a leaf node at the end of the tree, so just return the single state bit as lsb of the way-to-replace encoded value
state(0)
} else { // tree_nways <= 1
// we are at an empty node in an unbalanced tree for non-power-of-2 ways, so return single zero bit as lsb of the way-to-replace encoded value
0.U(1.W)
}
}
def get_replace_way(state: UInt): UInt = get_replace_way(state, n_ways)
def way = get_replace_way(state_reg)
def miss = access(way)
def hit = {}
}
class SeqPLRU(n_sets: Int, n_ways: Int) extends SeqReplacementPolicy {
val logic = new PseudoLRU(n_ways)
val state = SyncReadMem(n_sets, UInt(logic.nBits.W))
val current_state = Wire(UInt(logic.nBits.W))
val next_state = Wire(UInt(logic.nBits.W))
val plru_way = logic.get_replace_way(current_state)
def access(set: UInt) = {
current_state := state.read(set)
}
def update(valid: Bool, hit: Bool, set: UInt, way: UInt) = {
val update_way = Mux(hit, way, plru_way)
next_state := logic.get_next_state(current_state, update_way)
when (valid) { state.write(set, next_state) }
}
def way = plru_way
}
class SetAssocLRU(n_sets: Int, n_ways: Int, policy: String) extends SetAssocReplacementPolicy {
val logic = policy.toLowerCase match {
case "plru" => new PseudoLRU(n_ways)
case "lru" => new TrueLRU(n_ways)
case t => throw new IllegalArgumentException(s"unknown Replacement Policy type $t")
}
val state_vec =
if (logic.nBits == 0) Reg(Vec(n_sets, UInt(logic.nBits.W))) // Work around elaboration error on following line
else RegInit(VecInit(Seq.fill(n_sets)(0.U(logic.nBits.W))))
def access(set: UInt, touch_way: UInt) = {
state_vec(set) := logic.get_next_state(state_vec(set), touch_way)
}
def access(sets: Seq[UInt], touch_ways: Seq[Valid[UInt]]) = {
require(sets.size == touch_ways.size, "internal consistency check: should be same number of simultaneous updates for sets and touch_ways")
for (set <- 0 until n_sets) {
val set_touch_ways = (sets zip touch_ways).map { case (touch_set, touch_way) =>
Pipe(touch_way.valid && (touch_set === set.U), touch_way.bits, 0)}
when (set_touch_ways.map(_.valid).orR) {
state_vec(set) := logic.get_next_state(state_vec(set), set_touch_ways)
}
}
}
def way(set: UInt) = logic.get_replace_way(state_vec(set))
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class PLRUTest(n_ways: Int, timeout: Int = 500) extends UnitTest(timeout) {
val plru = new PseudoLRU(n_ways)
// step
io.finished := RegNext(true.B, false.B)
val get_replace_ways = (0 until (1 << (n_ways-1))).map(state =>
plru.get_replace_way(state = state.U((n_ways-1).W)))
val get_next_states = (0 until (1 << (n_ways-1))).map(state => (0 until n_ways).map(way =>
plru.get_next_state (state = state.U((n_ways-1).W), touch_way = way.U(log2Ceil(n_ways).W))))
n_ways match {
case 2 => {
assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0))
assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1))
assert(get_next_states(0)(0) === 1.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=1 actual=%d", get_next_states(0)(0))
assert(get_next_states(0)(1) === 0.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=0 actual=%d", get_next_states(0)(1))
assert(get_next_states(1)(0) === 1.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=1 actual=%d", get_next_states(1)(0))
assert(get_next_states(1)(1) === 0.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=0 actual=%d", get_next_states(1)(1))
}
case 3 => {
assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0))
assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1))
assert(get_replace_ways(2) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=2: expected=2 actual=%d", get_replace_ways(2))
assert(get_replace_ways(3) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=3: expected=2 actual=%d", get_replace_ways(3))
assert(get_next_states(0)(0) === 3.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=3 actual=%d", get_next_states(0)(0))
assert(get_next_states(0)(1) === 2.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=2 actual=%d", get_next_states(0)(1))
assert(get_next_states(0)(2) === 0.U(plru.nBits.W), s"get_next_state state=0 way=2: expected=0 actual=%d", get_next_states(0)(2))
assert(get_next_states(1)(0) === 3.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=3 actual=%d", get_next_states(1)(0))
assert(get_next_states(1)(1) === 2.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=2 actual=%d", get_next_states(1)(1))
assert(get_next_states(1)(2) === 1.U(plru.nBits.W), s"get_next_state state=1 way=2: expected=1 actual=%d", get_next_states(1)(2))
assert(get_next_states(2)(0) === 3.U(plru.nBits.W), s"get_next_state state=2 way=0: expected=3 actual=%d", get_next_states(2)(0))
assert(get_next_states(2)(1) === 2.U(plru.nBits.W), s"get_next_state state=2 way=1: expected=2 actual=%d", get_next_states(2)(1))
assert(get_next_states(2)(2) === 0.U(plru.nBits.W), s"get_next_state state=2 way=2: expected=0 actual=%d", get_next_states(2)(2))
assert(get_next_states(3)(0) === 3.U(plru.nBits.W), s"get_next_state state=3 way=0: expected=3 actual=%d", get_next_states(3)(0))
assert(get_next_states(3)(1) === 2.U(plru.nBits.W), s"get_next_state state=3 way=1: expected=2 actual=%d", get_next_states(3)(1))
assert(get_next_states(3)(2) === 1.U(plru.nBits.W), s"get_next_state state=3 way=2: expected=1 actual=%d", get_next_states(3)(2))
}
case 4 => {
assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0))
assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1))
assert(get_replace_ways(2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=2: expected=0 actual=%d", get_replace_ways(2))
assert(get_replace_ways(3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=3: expected=1 actual=%d", get_replace_ways(3))
assert(get_replace_ways(4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=4: expected=2 actual=%d", get_replace_ways(4))
assert(get_replace_ways(5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=5: expected=2 actual=%d", get_replace_ways(5))
assert(get_replace_ways(6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=6: expected=3 actual=%d", get_replace_ways(6))
assert(get_replace_ways(7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=7: expected=3 actual=%d", get_replace_ways(7))
assert(get_next_states(0)(0) === 5.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=5 actual=%d", get_next_states(0)(0))
assert(get_next_states(0)(1) === 4.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=4 actual=%d", get_next_states(0)(1))
assert(get_next_states(0)(2) === 2.U(plru.nBits.W), s"get_next_state state=0 way=2: expected=2 actual=%d", get_next_states(0)(2))
assert(get_next_states(0)(3) === 0.U(plru.nBits.W), s"get_next_state state=0 way=3: expected=0 actual=%d", get_next_states(0)(3))
assert(get_next_states(1)(0) === 5.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=5 actual=%d", get_next_states(1)(0))
assert(get_next_states(1)(1) === 4.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=4 actual=%d", get_next_states(1)(1))
assert(get_next_states(1)(2) === 3.U(plru.nBits.W), s"get_next_state state=1 way=2: expected=3 actual=%d", get_next_states(1)(2))
assert(get_next_states(1)(3) === 1.U(plru.nBits.W), s"get_next_state state=1 way=3: expected=1 actual=%d", get_next_states(1)(3))
assert(get_next_states(2)(0) === 7.U(plru.nBits.W), s"get_next_state state=2 way=0: expected=7 actual=%d", get_next_states(2)(0))
assert(get_next_states(2)(1) === 6.U(plru.nBits.W), s"get_next_state state=2 way=1: expected=6 actual=%d", get_next_states(2)(1))
assert(get_next_states(2)(2) === 2.U(plru.nBits.W), s"get_next_state state=2 way=2: expected=2 actual=%d", get_next_states(2)(2))
assert(get_next_states(2)(3) === 0.U(plru.nBits.W), s"get_next_state state=2 way=3: expected=0 actual=%d", get_next_states(2)(3))
assert(get_next_states(3)(0) === 7.U(plru.nBits.W), s"get_next_state state=3 way=0: expected=7 actual=%d", get_next_states(3)(0))
assert(get_next_states(3)(1) === 6.U(plru.nBits.W), s"get_next_state state=3 way=1: expected=6 actual=%d", get_next_states(3)(1))
assert(get_next_states(3)(2) === 3.U(plru.nBits.W), s"get_next_state state=3 way=2: expected=3 actual=%d", get_next_states(3)(2))
assert(get_next_states(3)(3) === 1.U(plru.nBits.W), s"get_next_state state=3 way=3: expected=1 actual=%d", get_next_states(3)(3))
assert(get_next_states(4)(0) === 5.U(plru.nBits.W), s"get_next_state state=4 way=0: expected=5 actual=%d", get_next_states(4)(0))
assert(get_next_states(4)(1) === 4.U(plru.nBits.W), s"get_next_state state=4 way=1: expected=4 actual=%d", get_next_states(4)(1))
assert(get_next_states(4)(2) === 2.U(plru.nBits.W), s"get_next_state state=4 way=2: expected=2 actual=%d", get_next_states(4)(2))
assert(get_next_states(4)(3) === 0.U(plru.nBits.W), s"get_next_state state=4 way=3: expected=0 actual=%d", get_next_states(4)(3))
assert(get_next_states(5)(0) === 5.U(plru.nBits.W), s"get_next_state state=5 way=0: expected=5 actual=%d", get_next_states(5)(0))
assert(get_next_states(5)(1) === 4.U(plru.nBits.W), s"get_next_state state=5 way=1: expected=4 actual=%d", get_next_states(5)(1))
assert(get_next_states(5)(2) === 3.U(plru.nBits.W), s"get_next_state state=5 way=2: expected=3 actual=%d", get_next_states(5)(2))
assert(get_next_states(5)(3) === 1.U(plru.nBits.W), s"get_next_state state=5 way=3: expected=1 actual=%d", get_next_states(5)(3))
assert(get_next_states(6)(0) === 7.U(plru.nBits.W), s"get_next_state state=6 way=0: expected=7 actual=%d", get_next_states(6)(0))
assert(get_next_states(6)(1) === 6.U(plru.nBits.W), s"get_next_state state=6 way=1: expected=6 actual=%d", get_next_states(6)(1))
assert(get_next_states(6)(2) === 2.U(plru.nBits.W), s"get_next_state state=6 way=2: expected=2 actual=%d", get_next_states(6)(2))
assert(get_next_states(6)(3) === 0.U(plru.nBits.W), s"get_next_state state=6 way=3: expected=0 actual=%d", get_next_states(6)(3))
assert(get_next_states(7)(0) === 7.U(plru.nBits.W), s"get_next_state state=7 way=0: expected=7 actual=%d", get_next_states(7)(0))
assert(get_next_states(7)(1) === 6.U(plru.nBits.W), s"get_next_state state=7 way=5: expected=6 actual=%d", get_next_states(7)(1))
assert(get_next_states(7)(2) === 3.U(plru.nBits.W), s"get_next_state state=7 way=2: expected=3 actual=%d", get_next_states(7)(2))
assert(get_next_states(7)(3) === 1.U(plru.nBits.W), s"get_next_state state=7 way=3: expected=1 actual=%d", get_next_states(7)(3))
}
case 5 => {
assert(get_replace_ways( 0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=00: expected=0 actual=%d", get_replace_ways( 0))
assert(get_replace_ways( 1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=01: expected=1 actual=%d", get_replace_ways( 1))
assert(get_replace_ways( 2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=02: expected=0 actual=%d", get_replace_ways( 2))
assert(get_replace_ways( 3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=03: expected=1 actual=%d", get_replace_ways( 3))
assert(get_replace_ways( 4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=04: expected=2 actual=%d", get_replace_ways( 4))
assert(get_replace_ways( 5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=05: expected=2 actual=%d", get_replace_ways( 5))
assert(get_replace_ways( 6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=06: expected=3 actual=%d", get_replace_ways( 6))
assert(get_replace_ways( 7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=07: expected=3 actual=%d", get_replace_ways( 7))
assert(get_replace_ways( 8) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=08: expected=4 actual=%d", get_replace_ways( 8))
assert(get_replace_ways( 9) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=09: expected=4 actual=%d", get_replace_ways( 9))
assert(get_replace_ways(10) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=10: expected=4 actual=%d", get_replace_ways(10))
assert(get_replace_ways(11) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=11: expected=4 actual=%d", get_replace_ways(11))
assert(get_replace_ways(12) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=12: expected=4 actual=%d", get_replace_ways(12))
assert(get_replace_ways(13) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=13: expected=4 actual=%d", get_replace_ways(13))
assert(get_replace_ways(14) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=14: expected=4 actual=%d", get_replace_ways(14))
assert(get_replace_ways(15) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=15: expected=4 actual=%d", get_replace_ways(15))
assert(get_next_states( 0)(0) === 13.U(plru.nBits.W), s"get_next_state state=00 way=0: expected=13 actual=%d", get_next_states( 0)(0))
assert(get_next_states( 0)(1) === 12.U(plru.nBits.W), s"get_next_state state=00 way=1: expected=12 actual=%d", get_next_states( 0)(1))
assert(get_next_states( 0)(2) === 10.U(plru.nBits.W), s"get_next_state state=00 way=2: expected=10 actual=%d", get_next_states( 0)(2))
assert(get_next_states( 0)(3) === 8.U(plru.nBits.W), s"get_next_state state=00 way=3: expected=08 actual=%d", get_next_states( 0)(3))
assert(get_next_states( 0)(4) === 0.U(plru.nBits.W), s"get_next_state state=00 way=4: expected=00 actual=%d", get_next_states( 0)(4))
assert(get_next_states( 1)(0) === 13.U(plru.nBits.W), s"get_next_state state=01 way=0: expected=13 actual=%d", get_next_states( 1)(0))
assert(get_next_states( 1)(1) === 12.U(plru.nBits.W), s"get_next_state state=01 way=1: expected=12 actual=%d", get_next_states( 1)(1))
assert(get_next_states( 1)(2) === 11.U(plru.nBits.W), s"get_next_state state=01 way=2: expected=11 actual=%d", get_next_states( 1)(2))
assert(get_next_states( 1)(3) === 9.U(plru.nBits.W), s"get_next_state state=01 way=3: expected=09 actual=%d", get_next_states( 1)(3))
assert(get_next_states( 1)(4) === 1.U(plru.nBits.W), s"get_next_state state=01 way=4: expected=01 actual=%d", get_next_states( 1)(4))
assert(get_next_states( 2)(0) === 15.U(plru.nBits.W), s"get_next_state state=02 way=0: expected=15 actual=%d", get_next_states( 2)(0))
assert(get_next_states( 2)(1) === 14.U(plru.nBits.W), s"get_next_state state=02 way=1: expected=14 actual=%d", get_next_states( 2)(1))
assert(get_next_states( 2)(2) === 10.U(plru.nBits.W), s"get_next_state state=02 way=2: expected=10 actual=%d", get_next_states( 2)(2))
assert(get_next_states( 2)(3) === 8.U(plru.nBits.W), s"get_next_state state=02 way=3: expected=08 actual=%d", get_next_states( 2)(3))
assert(get_next_states( 2)(4) === 2.U(plru.nBits.W), s"get_next_state state=02 way=4: expected=02 actual=%d", get_next_states( 2)(4))
assert(get_next_states( 3)(0) === 15.U(plru.nBits.W), s"get_next_state state=03 way=0: expected=15 actual=%d", get_next_states( 3)(0))
assert(get_next_states( 3)(1) === 14.U(plru.nBits.W), s"get_next_state state=03 way=1: expected=14 actual=%d", get_next_states( 3)(1))
assert(get_next_states( 3)(2) === 11.U(plru.nBits.W), s"get_next_state state=03 way=2: expected=11 actual=%d", get_next_states( 3)(2))
assert(get_next_states( 3)(3) === 9.U(plru.nBits.W), s"get_next_state state=03 way=3: expected=09 actual=%d", get_next_states( 3)(3))
assert(get_next_states( 3)(4) === 3.U(plru.nBits.W), s"get_next_state state=03 way=4: expected=03 actual=%d", get_next_states( 3)(4))
assert(get_next_states( 4)(0) === 13.U(plru.nBits.W), s"get_next_state state=04 way=0: expected=13 actual=%d", get_next_states( 4)(0))
assert(get_next_states( 4)(1) === 12.U(plru.nBits.W), s"get_next_state state=04 way=1: expected=12 actual=%d", get_next_states( 4)(1))
assert(get_next_states( 4)(2) === 10.U(plru.nBits.W), s"get_next_state state=04 way=2: expected=10 actual=%d", get_next_states( 4)(2))
assert(get_next_states( 4)(3) === 8.U(plru.nBits.W), s"get_next_state state=04 way=3: expected=08 actual=%d", get_next_states( 4)(3))
assert(get_next_states( 4)(4) === 4.U(plru.nBits.W), s"get_next_state state=04 way=4: expected=04 actual=%d", get_next_states( 4)(4))
assert(get_next_states( 5)(0) === 13.U(plru.nBits.W), s"get_next_state state=05 way=0: expected=13 actual=%d", get_next_states( 5)(0))
assert(get_next_states( 5)(1) === 12.U(plru.nBits.W), s"get_next_state state=05 way=1: expected=12 actual=%d", get_next_states( 5)(1))
assert(get_next_states( 5)(2) === 11.U(plru.nBits.W), s"get_next_state state=05 way=2: expected=11 actual=%d", get_next_states( 5)(2))
assert(get_next_states( 5)(3) === 9.U(plru.nBits.W), s"get_next_state state=05 way=3: expected=09 actual=%d", get_next_states( 5)(3))
assert(get_next_states( 5)(4) === 5.U(plru.nBits.W), s"get_next_state state=05 way=4: expected=05 actual=%d", get_next_states( 5)(4))
assert(get_next_states( 6)(0) === 15.U(plru.nBits.W), s"get_next_state state=06 way=0: expected=15 actual=%d", get_next_states( 6)(0))
assert(get_next_states( 6)(1) === 14.U(plru.nBits.W), s"get_next_state state=06 way=1: expected=14 actual=%d", get_next_states( 6)(1))
assert(get_next_states( 6)(2) === 10.U(plru.nBits.W), s"get_next_state state=06 way=2: expected=10 actual=%d", get_next_states( 6)(2))
assert(get_next_states( 6)(3) === 8.U(plru.nBits.W), s"get_next_state state=06 way=3: expected=08 actual=%d", get_next_states( 6)(3))
assert(get_next_states( 6)(4) === 6.U(plru.nBits.W), s"get_next_state state=06 way=4: expected=06 actual=%d", get_next_states( 6)(4))
assert(get_next_states( 7)(0) === 15.U(plru.nBits.W), s"get_next_state state=07 way=0: expected=15 actual=%d", get_next_states( 7)(0))
assert(get_next_states( 7)(1) === 14.U(plru.nBits.W), s"get_next_state state=07 way=5: expected=14 actual=%d", get_next_states( 7)(1))
assert(get_next_states( 7)(2) === 11.U(plru.nBits.W), s"get_next_state state=07 way=2: expected=11 actual=%d", get_next_states( 7)(2))
assert(get_next_states( 7)(3) === 9.U(plru.nBits.W), s"get_next_state state=07 way=3: expected=09 actual=%d", get_next_states( 7)(3))
assert(get_next_states( 7)(4) === 7.U(plru.nBits.W), s"get_next_state state=07 way=4: expected=07 actual=%d", get_next_states( 7)(4))
assert(get_next_states( 8)(0) === 13.U(plru.nBits.W), s"get_next_state state=08 way=0: expected=13 actual=%d", get_next_states( 8)(0))
assert(get_next_states( 8)(1) === 12.U(plru.nBits.W), s"get_next_state state=08 way=1: expected=12 actual=%d", get_next_states( 8)(1))
assert(get_next_states( 8)(2) === 10.U(plru.nBits.W), s"get_next_state state=08 way=2: expected=10 actual=%d", get_next_states( 8)(2))
assert(get_next_states( 8)(3) === 8.U(plru.nBits.W), s"get_next_state state=08 way=3: expected=08 actual=%d", get_next_states( 8)(3))
assert(get_next_states( 8)(4) === 0.U(plru.nBits.W), s"get_next_state state=08 way=4: expected=00 actual=%d", get_next_states( 8)(4))
assert(get_next_states( 9)(0) === 13.U(plru.nBits.W), s"get_next_state state=09 way=0: expected=13 actual=%d", get_next_states( 9)(0))
assert(get_next_states( 9)(1) === 12.U(plru.nBits.W), s"get_next_state state=09 way=1: expected=12 actual=%d", get_next_states( 9)(1))
assert(get_next_states( 9)(2) === 11.U(plru.nBits.W), s"get_next_state state=09 way=2: expected=11 actual=%d", get_next_states( 9)(2))
assert(get_next_states( 9)(3) === 9.U(plru.nBits.W), s"get_next_state state=09 way=3: expected=09 actual=%d", get_next_states( 9)(3))
assert(get_next_states( 9)(4) === 1.U(plru.nBits.W), s"get_next_state state=09 way=4: expected=01 actual=%d", get_next_states( 9)(4))
assert(get_next_states(10)(0) === 15.U(plru.nBits.W), s"get_next_state state=10 way=0: expected=15 actual=%d", get_next_states(10)(0))
assert(get_next_states(10)(1) === 14.U(plru.nBits.W), s"get_next_state state=10 way=1: expected=14 actual=%d", get_next_states(10)(1))
assert(get_next_states(10)(2) === 10.U(plru.nBits.W), s"get_next_state state=10 way=2: expected=10 actual=%d", get_next_states(10)(2))
assert(get_next_states(10)(3) === 8.U(plru.nBits.W), s"get_next_state state=10 way=3: expected=08 actual=%d", get_next_states(10)(3))
assert(get_next_states(10)(4) === 2.U(plru.nBits.W), s"get_next_state state=10 way=4: expected=02 actual=%d", get_next_states(10)(4))
assert(get_next_states(11)(0) === 15.U(plru.nBits.W), s"get_next_state state=11 way=0: expected=15 actual=%d", get_next_states(11)(0))
assert(get_next_states(11)(1) === 14.U(plru.nBits.W), s"get_next_state state=11 way=1: expected=14 actual=%d", get_next_states(11)(1))
assert(get_next_states(11)(2) === 11.U(plru.nBits.W), s"get_next_state state=11 way=2: expected=11 actual=%d", get_next_states(11)(2))
assert(get_next_states(11)(3) === 9.U(plru.nBits.W), s"get_next_state state=11 way=3: expected=09 actual=%d", get_next_states(11)(3))
assert(get_next_states(11)(4) === 3.U(plru.nBits.W), s"get_next_state state=11 way=4: expected=03 actual=%d", get_next_states(11)(4))
assert(get_next_states(12)(0) === 13.U(plru.nBits.W), s"get_next_state state=12 way=0: expected=13 actual=%d", get_next_states(12)(0))
assert(get_next_states(12)(1) === 12.U(plru.nBits.W), s"get_next_state state=12 way=1: expected=12 actual=%d", get_next_states(12)(1))
assert(get_next_states(12)(2) === 10.U(plru.nBits.W), s"get_next_state state=12 way=2: expected=10 actual=%d", get_next_states(12)(2))
assert(get_next_states(12)(3) === 8.U(plru.nBits.W), s"get_next_state state=12 way=3: expected=08 actual=%d", get_next_states(12)(3))
assert(get_next_states(12)(4) === 4.U(plru.nBits.W), s"get_next_state state=12 way=4: expected=04 actual=%d", get_next_states(12)(4))
assert(get_next_states(13)(0) === 13.U(plru.nBits.W), s"get_next_state state=13 way=0: expected=13 actual=%d", get_next_states(13)(0))
assert(get_next_states(13)(1) === 12.U(plru.nBits.W), s"get_next_state state=13 way=1: expected=12 actual=%d", get_next_states(13)(1))
assert(get_next_states(13)(2) === 11.U(plru.nBits.W), s"get_next_state state=13 way=2: expected=11 actual=%d", get_next_states(13)(2))
assert(get_next_states(13)(3) === 9.U(plru.nBits.W), s"get_next_state state=13 way=3: expected=09 actual=%d", get_next_states(13)(3))
assert(get_next_states(13)(4) === 5.U(plru.nBits.W), s"get_next_state state=13 way=4: expected=05 actual=%d", get_next_states(13)(4))
assert(get_next_states(14)(0) === 15.U(plru.nBits.W), s"get_next_state state=14 way=0: expected=15 actual=%d", get_next_states(14)(0))
assert(get_next_states(14)(1) === 14.U(plru.nBits.W), s"get_next_state state=14 way=1: expected=14 actual=%d", get_next_states(14)(1))
assert(get_next_states(14)(2) === 10.U(plru.nBits.W), s"get_next_state state=14 way=2: expected=10 actual=%d", get_next_states(14)(2))
assert(get_next_states(14)(3) === 8.U(plru.nBits.W), s"get_next_state state=14 way=3: expected=08 actual=%d", get_next_states(14)(3))
assert(get_next_states(14)(4) === 6.U(plru.nBits.W), s"get_next_state state=14 way=4: expected=06 actual=%d", get_next_states(14)(4))
assert(get_next_states(15)(0) === 15.U(plru.nBits.W), s"get_next_state state=15 way=0: expected=15 actual=%d", get_next_states(15)(0))
assert(get_next_states(15)(1) === 14.U(plru.nBits.W), s"get_next_state state=15 way=5: expected=14 actual=%d", get_next_states(15)(1))
assert(get_next_states(15)(2) === 11.U(plru.nBits.W), s"get_next_state state=15 way=2: expected=11 actual=%d", get_next_states(15)(2))
assert(get_next_states(15)(3) === 9.U(plru.nBits.W), s"get_next_state state=15 way=3: expected=09 actual=%d", get_next_states(15)(3))
assert(get_next_states(15)(4) === 7.U(plru.nBits.W), s"get_next_state state=15 way=4: expected=07 actual=%d", get_next_states(15)(4))
}
case 6 => {
assert(get_replace_ways( 0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=00: expected=0 actual=%d", get_replace_ways( 0))
assert(get_replace_ways( 1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=01: expected=1 actual=%d", get_replace_ways( 1))
assert(get_replace_ways( 2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=02: expected=0 actual=%d", get_replace_ways( 2))
assert(get_replace_ways( 3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=03: expected=1 actual=%d", get_replace_ways( 3))
assert(get_replace_ways( 4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=04: expected=2 actual=%d", get_replace_ways( 4))
assert(get_replace_ways( 5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=05: expected=2 actual=%d", get_replace_ways( 5))
assert(get_replace_ways( 6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=06: expected=3 actual=%d", get_replace_ways( 6))
assert(get_replace_ways( 7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=07: expected=3 actual=%d", get_replace_ways( 7))
assert(get_replace_ways( 8) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=08: expected=0 actual=%d", get_replace_ways( 8))
assert(get_replace_ways( 9) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=09: expected=1 actual=%d", get_replace_ways( 9))
assert(get_replace_ways(10) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=10: expected=0 actual=%d", get_replace_ways(10))
assert(get_replace_ways(11) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=11: expected=1 actual=%d", get_replace_ways(11))
assert(get_replace_ways(12) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=12: expected=2 actual=%d", get_replace_ways(12))
assert(get_replace_ways(13) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=13: expected=2 actual=%d", get_replace_ways(13))
assert(get_replace_ways(14) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=14: expected=3 actual=%d", get_replace_ways(14))
assert(get_replace_ways(15) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=15: expected=3 actual=%d", get_replace_ways(15))
assert(get_replace_ways(16) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=16: expected=4 actual=%d", get_replace_ways(16))
assert(get_replace_ways(17) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=17: expected=4 actual=%d", get_replace_ways(17))
assert(get_replace_ways(18) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=18: expected=4 actual=%d", get_replace_ways(18))
assert(get_replace_ways(19) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=19: expected=4 actual=%d", get_replace_ways(19))
assert(get_replace_ways(20) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=20: expected=4 actual=%d", get_replace_ways(20))
assert(get_replace_ways(21) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=21: expected=4 actual=%d", get_replace_ways(21))
assert(get_replace_ways(22) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=22: expected=4 actual=%d", get_replace_ways(22))
assert(get_replace_ways(23) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=23: expected=4 actual=%d", get_replace_ways(23))
assert(get_replace_ways(24) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=24: expected=5 actual=%d", get_replace_ways(24))
assert(get_replace_ways(25) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=25: expected=5 actual=%d", get_replace_ways(25))
assert(get_replace_ways(26) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=26: expected=5 actual=%d", get_replace_ways(26))
assert(get_replace_ways(27) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=27: expected=5 actual=%d", get_replace_ways(27))
assert(get_replace_ways(28) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=28: expected=5 actual=%d", get_replace_ways(28))
assert(get_replace_ways(29) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=29: expected=5 actual=%d", get_replace_ways(29))
assert(get_replace_ways(30) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=30: expected=5 actual=%d", get_replace_ways(30))
assert(get_replace_ways(31) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=31: expected=5 actual=%d", get_replace_ways(31))
}
case _ => throw new IllegalArgumentException(s"no test pattern found for n_ways=$n_ways")
}
}
File BTB.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.subsystem.CacheBlockBytes
import freechips.rocketchip.tile.HasCoreParameters
import freechips.rocketchip.util._
case class BHTParams(
nEntries: Int = 512,
counterLength: Int = 1,
historyLength: Int = 8,
historyBits: Int = 3)
case class BTBParams(
nEntries: Int = 28,
nMatchBits: Int = 14,
nPages: Int = 6,
nRAS: Int = 6,
bhtParams: Option[BHTParams] = Some(BHTParams()),
updatesOutOfOrder: Boolean = false)
trait HasBtbParameters extends HasCoreParameters { this: InstanceId =>
val btbParams = tileParams.btb.getOrElse(BTBParams(nEntries = 0))
val matchBits = btbParams.nMatchBits max log2Ceil(p(CacheBlockBytes) * tileParams.icache.get.nSets)
val entries = btbParams.nEntries
val updatesOutOfOrder = btbParams.updatesOutOfOrder
val nPages = (btbParams.nPages + 1) / 2 * 2 // control logic assumes 2 divides pages
}
abstract class BtbModule(implicit val p: Parameters) extends Module with HasBtbParameters {
Annotated.params(this, btbParams)
}
abstract class BtbBundle(implicit val p: Parameters) extends Bundle with HasBtbParameters
class RAS(nras: Int) {
def push(addr: UInt): Unit = {
when (count < nras.U) { count := count + 1.U }
val nextPos = Mux((isPow2(nras)).B || pos < (nras-1).U, pos+1.U, 0.U)
stack(nextPos) := addr
pos := nextPos
}
def peek: UInt = stack(pos)
def pop(): Unit = when (!isEmpty) {
count := count - 1.U
pos := Mux((isPow2(nras)).B || pos > 0.U, pos-1.U, (nras-1).U)
}
def clear(): Unit = count := 0.U
def isEmpty: Bool = count === 0.U
private val count = RegInit(0.U(log2Up(nras+1).W))
private val pos = RegInit(0.U(log2Up(nras).W))
private val stack = Reg(Vec(nras, UInt()))
}
class BHTResp(implicit p: Parameters) extends BtbBundle()(p) {
val history = UInt(btbParams.bhtParams.map(_.historyLength).getOrElse(1).W)
val value = UInt(btbParams.bhtParams.map(_.counterLength).getOrElse(1).W)
def taken = value(0)
def strongly_taken = value === 1.U
}
// BHT contains table of 2-bit counters and a global history register.
// The BHT only predicts and updates when there is a BTB hit.
// The global history:
// - updated speculatively in fetch (if there's a BTB hit).
// - on a mispredict, the history register is reset (again, only if BTB hit).
// The counter table:
// - each counter corresponds with the address of the fetch packet ("fetch pc").
// - updated when a branch resolves (and BTB was a hit for that branch).
// The updating branch must provide its "fetch pc".
class BHT(params: BHTParams)(implicit val p: Parameters) extends HasCoreParameters {
def index(addr: UInt, history: UInt) = {
def hashHistory(hist: UInt) = if (params.historyLength == params.historyBits) hist else {
val k = math.sqrt(3)/2
val i = BigDecimal(k * math.pow(2, params.historyLength)).toBigInt
(i.U * hist)(params.historyLength-1, params.historyLength-params.historyBits)
}
def hashAddr(addr: UInt) = {
val hi = addr >> log2Ceil(fetchBytes)
hi(log2Ceil(params.nEntries)-1, 0) ^ (hi >> log2Ceil(params.nEntries))(1, 0)
}
hashAddr(addr) ^ (hashHistory(history) << (log2Up(params.nEntries) - params.historyBits))
}
def get(addr: UInt): BHTResp = {
val res = Wire(new BHTResp)
res.value := Mux(resetting, 0.U, table(index(addr, history)))
res.history := history
res
}
def updateTable(addr: UInt, d: BHTResp, taken: Bool): Unit = {
wen := true.B
when (!resetting) {
waddr := index(addr, d.history)
wdata := (params.counterLength match {
case 1 => taken
case 2 => Cat(taken ^ d.value(0), d.value === 1.U || d.value(1) && taken)
})
}
}
def resetHistory(d: BHTResp): Unit = {
history := d.history
}
def updateHistory(addr: UInt, d: BHTResp, taken: Bool): Unit = {
history := Cat(taken, d.history >> 1)
}
def advanceHistory(taken: Bool): Unit = {
history := Cat(taken, history >> 1)
}
private val table = Mem(params.nEntries, UInt(params.counterLength.W))
val history = RegInit(0.U(params.historyLength.W))
private val reset_waddr = RegInit(0.U((params.nEntries.log2+1).W))
private val resetting = !reset_waddr(params.nEntries.log2)
private val wen = WireInit(resetting)
private val waddr = WireInit(reset_waddr)
private val wdata = WireInit(0.U)
when (resetting) { reset_waddr := reset_waddr + 1.U }
when (wen) { table(waddr) := wdata }
}
object CFIType {
def SZ = 2
def apply() = UInt(SZ.W)
def branch = 0.U
def jump = 1.U
def call = 2.U
def ret = 3.U
}
// BTB update occurs during branch resolution (and only on a mispredict).
// - "pc" is what future fetch PCs will tag match against.
// - "br_pc" is the PC of the branch instruction.
class BTBUpdate(implicit p: Parameters) extends BtbBundle()(p) {
val prediction = new BTBResp
val pc = UInt(vaddrBits.W)
val target = UInt(vaddrBits.W)
val taken = Bool()
val isValid = Bool()
val br_pc = UInt(vaddrBits.W)
val cfiType = CFIType()
}
// BHT update occurs during branch resolution on all conditional branches.
// - "pc" is what future fetch PCs will tag match against.
class BHTUpdate(implicit p: Parameters) extends BtbBundle()(p) {
val prediction = new BHTResp
val pc = UInt(vaddrBits.W)
val branch = Bool()
val taken = Bool()
val mispredict = Bool()
}
class RASUpdate(implicit p: Parameters) extends BtbBundle()(p) {
val cfiType = CFIType()
val returnAddr = UInt(vaddrBits.W)
}
// - "bridx" is the low-order PC bits of the predicted branch (after
// shifting off the lowest log(inst_bytes) bits off).
// - "mask" provides a mask of valid instructions (instructions are
// masked off by the predicted taken branch from the BTB).
class BTBResp(implicit p: Parameters) extends BtbBundle()(p) {
val cfiType = CFIType()
val taken = Bool()
val mask = Bits(fetchWidth.W)
val bridx = Bits(log2Up(fetchWidth).W)
val target = UInt(vaddrBits.W)
val entry = UInt(log2Up(entries + 1).W)
val bht = new BHTResp
}
class BTBReq(implicit p: Parameters) extends BtbBundle()(p) {
val addr = UInt(vaddrBits.W)
}
// fully-associative branch target buffer
// Higher-performance processors may cause BTB updates to occur out-of-order,
// which requires an extra CAM port for updates (to ensure no duplicates get
// placed in BTB).
class BTB(implicit p: Parameters) extends BtbModule {
val io = IO(new Bundle {
val req = Flipped(Valid(new BTBReq))
val resp = Valid(new BTBResp)
val btb_update = Flipped(Valid(new BTBUpdate))
val bht_update = Flipped(Valid(new BHTUpdate))
val bht_advance = Flipped(Valid(new BTBResp))
val ras_update = Flipped(Valid(new RASUpdate))
val ras_head = Valid(UInt(vaddrBits.W))
val flush = Input(Bool())
})
val idxs = Reg(Vec(entries, UInt((matchBits - log2Up(coreInstBytes)).W)))
val idxPages = Reg(Vec(entries, UInt(log2Up(nPages).W)))
val tgts = Reg(Vec(entries, UInt((matchBits - log2Up(coreInstBytes)).W)))
val tgtPages = Reg(Vec(entries, UInt(log2Up(nPages).W)))
val pages = Reg(Vec(nPages, UInt((vaddrBits - matchBits).W)))
val pageValid = RegInit(0.U(nPages.W))
val pagesMasked = (pageValid.asBools zip pages).map { case (v, p) => Mux(v, p, 0.U) }
val isValid = RegInit(0.U(entries.W))
val cfiType = Reg(Vec(entries, CFIType()))
val brIdx = Reg(Vec(entries, UInt(log2Up(fetchWidth).W)))
private def page(addr: UInt) = addr >> matchBits
private def pageMatch(addr: UInt) = {
val p = page(addr)
pageValid & pages.map(_ === p).asUInt
}
private def idxMatch(addr: UInt) = {
val idx = addr(matchBits-1, log2Up(coreInstBytes))
idxs.map(_ === idx).asUInt & isValid
}
val r_btb_update = Pipe(io.btb_update)
val update_target = io.req.bits.addr
val pageHit = pageMatch(io.req.bits.addr)
val idxHit = idxMatch(io.req.bits.addr)
val updatePageHit = pageMatch(r_btb_update.bits.pc)
val (updateHit, updateHitAddr) =
if (updatesOutOfOrder) {
val updateHits = (pageHit << 1)(Mux1H(idxMatch(r_btb_update.bits.pc), idxPages))
(updateHits.orR, OHToUInt(updateHits))
} else (r_btb_update.bits.prediction.entry < entries.U, r_btb_update.bits.prediction.entry)
val useUpdatePageHit = updatePageHit.orR
val usePageHit = pageHit.orR
val doIdxPageRepl = !useUpdatePageHit
val nextPageRepl = RegInit(0.U(log2Ceil(nPages).W))
val idxPageRepl = Cat(pageHit(nPages-2,0), pageHit(nPages-1)) | Mux(usePageHit, 0.U, UIntToOH(nextPageRepl))
val idxPageUpdateOH = Mux(useUpdatePageHit, updatePageHit, idxPageRepl)
val idxPageUpdate = OHToUInt(idxPageUpdateOH)
val idxPageReplEn = Mux(doIdxPageRepl, idxPageRepl, 0.U)
val samePage = page(r_btb_update.bits.pc) === page(update_target)
val doTgtPageRepl = !samePage && !usePageHit
val tgtPageRepl = Mux(samePage, idxPageUpdateOH, Cat(idxPageUpdateOH(nPages-2,0), idxPageUpdateOH(nPages-1)))
val tgtPageUpdate = OHToUInt(pageHit | Mux(usePageHit, 0.U, tgtPageRepl))
val tgtPageReplEn = Mux(doTgtPageRepl, tgtPageRepl, 0.U)
when (r_btb_update.valid && (doIdxPageRepl || doTgtPageRepl)) {
val both = doIdxPageRepl && doTgtPageRepl
val next = nextPageRepl + Mux[UInt](both, 2.U, 1.U)
nextPageRepl := Mux(next >= nPages.U, next(0), next)
}
val repl = new PseudoLRU(entries)
val waddr = Mux(updateHit, updateHitAddr, repl.way)
val r_resp = Pipe(io.resp)
when (r_resp.valid && r_resp.bits.taken || r_btb_update.valid) {
repl.access(Mux(r_btb_update.valid, waddr, r_resp.bits.entry))
}
when (r_btb_update.valid) {
val mask = UIntToOH(waddr)
idxs(waddr) := r_btb_update.bits.pc(matchBits-1, log2Up(coreInstBytes))
tgts(waddr) := update_target(matchBits-1, log2Up(coreInstBytes))
idxPages(waddr) := idxPageUpdate +& 1.U // the +1 corresponds to the <<1 on io.resp.valid
tgtPages(waddr) := tgtPageUpdate
cfiType(waddr) := r_btb_update.bits.cfiType
isValid := Mux(r_btb_update.bits.isValid, isValid | mask, isValid & ~mask)
if (fetchWidth > 1)
brIdx(waddr) := r_btb_update.bits.br_pc >> log2Up(coreInstBytes)
require(nPages % 2 == 0)
val idxWritesEven = !idxPageUpdate(0)
def writeBank(i: Int, mod: Int, en: UInt, data: UInt) =
for (i <- i until nPages by mod)
when (en(i)) { pages(i) := data }
writeBank(0, 2, Mux(idxWritesEven, idxPageReplEn, tgtPageReplEn),
Mux(idxWritesEven, page(r_btb_update.bits.pc), page(update_target)))
writeBank(1, 2, Mux(idxWritesEven, tgtPageReplEn, idxPageReplEn),
Mux(idxWritesEven, page(update_target), page(r_btb_update.bits.pc)))
pageValid := pageValid | tgtPageReplEn | idxPageReplEn
}
io.resp.valid := (pageHit << 1)(Mux1H(idxHit, idxPages))
io.resp.bits.taken := true.B
io.resp.bits.target := Cat(pagesMasked(Mux1H(idxHit, tgtPages)), Mux1H(idxHit, tgts) << log2Up(coreInstBytes))
io.resp.bits.entry := OHToUInt(idxHit)
io.resp.bits.bridx := (if (fetchWidth > 1) Mux1H(idxHit, brIdx) else 0.U)
io.resp.bits.mask := Cat((1.U << ~Mux(io.resp.bits.taken, ~io.resp.bits.bridx, 0.U))-1.U, 1.U)
io.resp.bits.cfiType := Mux1H(idxHit, cfiType)
// if multiple entries for same PC land in BTB, zap them
when (PopCountAtLeast(idxHit, 2)) {
isValid := isValid & ~idxHit
}
when (io.flush) {
isValid := 0.U
}
if (btbParams.bhtParams.nonEmpty) {
val bht = new BHT(Annotated.params(this, btbParams.bhtParams.get))
val isBranch = (idxHit & cfiType.map(_ === CFIType.branch).asUInt).orR
val res = bht.get(io.req.bits.addr)
when (io.bht_advance.valid) {
bht.advanceHistory(io.bht_advance.bits.bht.taken)
}
when (io.bht_update.valid) {
when (io.bht_update.bits.branch) {
bht.updateTable(io.bht_update.bits.pc, io.bht_update.bits.prediction, io.bht_update.bits.taken)
when (io.bht_update.bits.mispredict) {
bht.updateHistory(io.bht_update.bits.pc, io.bht_update.bits.prediction, io.bht_update.bits.taken)
}
}.elsewhen (io.bht_update.bits.mispredict) {
bht.resetHistory(io.bht_update.bits.prediction)
}
}
when (!res.taken && isBranch) { io.resp.bits.taken := false.B }
io.resp.bits.bht := res
}
if (btbParams.nRAS > 0) {
val ras = new RAS(btbParams.nRAS)
val doPeek = (idxHit & cfiType.map(_ === CFIType.ret).asUInt).orR
io.ras_head.valid := !ras.isEmpty
io.ras_head.bits := ras.peek
when (!ras.isEmpty && doPeek) {
io.resp.bits.target := ras.peek
}
when (io.ras_update.valid) {
when (io.ras_update.bits.cfiType === CFIType.call) {
ras.push(io.ras_update.bits.returnAddr)
}.elsewhen (io.ras_update.bits.cfiType === CFIType.ret) {
ras.pop()
}
}
}
}
| module BTB_6( // @[BTB.scala:187:7]
input clock, // @[BTB.scala:187:7]
input reset, // @[BTB.scala:187:7]
input io_req_valid, // @[BTB.scala:188:14]
input [38:0] io_req_bits_addr, // @[BTB.scala:188:14]
output io_resp_valid, // @[BTB.scala:188:14]
output [1:0] io_resp_bits_cfiType, // @[BTB.scala:188:14]
output io_resp_bits_taken, // @[BTB.scala:188:14]
output [1:0] io_resp_bits_mask, // @[BTB.scala:188:14]
output io_resp_bits_bridx, // @[BTB.scala:188:14]
output [38:0] io_resp_bits_target, // @[BTB.scala:188:14]
output [4:0] io_resp_bits_entry, // @[BTB.scala:188:14]
output [7:0] io_resp_bits_bht_history, // @[BTB.scala:188:14]
output io_resp_bits_bht_value, // @[BTB.scala:188:14]
input io_btb_update_valid, // @[BTB.scala:188:14]
input [1:0] io_btb_update_bits_prediction_cfiType, // @[BTB.scala:188:14]
input io_btb_update_bits_prediction_taken, // @[BTB.scala:188:14]
input [1:0] io_btb_update_bits_prediction_mask, // @[BTB.scala:188:14]
input io_btb_update_bits_prediction_bridx, // @[BTB.scala:188:14]
input [38:0] io_btb_update_bits_prediction_target, // @[BTB.scala:188:14]
input [4:0] io_btb_update_bits_prediction_entry, // @[BTB.scala:188:14]
input [7:0] io_btb_update_bits_prediction_bht_history, // @[BTB.scala:188:14]
input io_btb_update_bits_prediction_bht_value, // @[BTB.scala:188:14]
input [38:0] io_btb_update_bits_pc, // @[BTB.scala:188:14]
input [38:0] io_btb_update_bits_target, // @[BTB.scala:188:14]
input io_btb_update_bits_isValid, // @[BTB.scala:188:14]
input [38:0] io_btb_update_bits_br_pc, // @[BTB.scala:188:14]
input [1:0] io_btb_update_bits_cfiType, // @[BTB.scala:188:14]
input io_bht_update_valid, // @[BTB.scala:188:14]
input [7:0] io_bht_update_bits_prediction_history, // @[BTB.scala:188:14]
input io_bht_update_bits_prediction_value, // @[BTB.scala:188:14]
input [38:0] io_bht_update_bits_pc, // @[BTB.scala:188:14]
input io_bht_update_bits_branch, // @[BTB.scala:188:14]
input io_bht_update_bits_taken, // @[BTB.scala:188:14]
input io_bht_update_bits_mispredict, // @[BTB.scala:188:14]
input io_bht_advance_valid, // @[BTB.scala:188:14]
input [1:0] io_bht_advance_bits_cfiType, // @[BTB.scala:188:14]
input io_bht_advance_bits_taken, // @[BTB.scala:188:14]
input [1:0] io_bht_advance_bits_mask, // @[BTB.scala:188:14]
input io_bht_advance_bits_bridx, // @[BTB.scala:188:14]
input [38:0] io_bht_advance_bits_target, // @[BTB.scala:188:14]
input [4:0] io_bht_advance_bits_entry, // @[BTB.scala:188:14]
input [7:0] io_bht_advance_bits_bht_history, // @[BTB.scala:188:14]
input io_bht_advance_bits_bht_value, // @[BTB.scala:188:14]
input io_ras_update_valid, // @[BTB.scala:188:14]
input [1:0] io_ras_update_bits_cfiType, // @[BTB.scala:188:14]
input [38:0] io_ras_update_bits_returnAddr, // @[BTB.scala:188:14]
output io_ras_head_valid, // @[BTB.scala:188:14]
output [38:0] io_ras_head_bits, // @[BTB.scala:188:14]
input io_flush // @[BTB.scala:188:14]
);
wire _table_ext_R0_data; // @[BTB.scala:116:26]
wire io_req_valid_0 = io_req_valid; // @[BTB.scala:187:7]
wire [38:0] io_req_bits_addr_0 = io_req_bits_addr; // @[BTB.scala:187:7]
wire io_btb_update_valid_0 = io_btb_update_valid; // @[BTB.scala:187:7]
wire [1:0] io_btb_update_bits_prediction_cfiType_0 = io_btb_update_bits_prediction_cfiType; // @[BTB.scala:187:7]
wire io_btb_update_bits_prediction_taken_0 = io_btb_update_bits_prediction_taken; // @[BTB.scala:187:7]
wire [1:0] io_btb_update_bits_prediction_mask_0 = io_btb_update_bits_prediction_mask; // @[BTB.scala:187:7]
wire io_btb_update_bits_prediction_bridx_0 = io_btb_update_bits_prediction_bridx; // @[BTB.scala:187:7]
wire [38:0] io_btb_update_bits_prediction_target_0 = io_btb_update_bits_prediction_target; // @[BTB.scala:187:7]
wire [4:0] io_btb_update_bits_prediction_entry_0 = io_btb_update_bits_prediction_entry; // @[BTB.scala:187:7]
wire [7:0] io_btb_update_bits_prediction_bht_history_0 = io_btb_update_bits_prediction_bht_history; // @[BTB.scala:187:7]
wire io_btb_update_bits_prediction_bht_value_0 = io_btb_update_bits_prediction_bht_value; // @[BTB.scala:187:7]
wire [38:0] io_btb_update_bits_pc_0 = io_btb_update_bits_pc; // @[BTB.scala:187:7]
wire [38:0] io_btb_update_bits_target_0 = io_btb_update_bits_target; // @[BTB.scala:187:7]
wire io_btb_update_bits_isValid_0 = io_btb_update_bits_isValid; // @[BTB.scala:187:7]
wire [38:0] io_btb_update_bits_br_pc_0 = io_btb_update_bits_br_pc; // @[BTB.scala:187:7]
wire [1:0] io_btb_update_bits_cfiType_0 = io_btb_update_bits_cfiType; // @[BTB.scala:187:7]
wire io_bht_update_valid_0 = io_bht_update_valid; // @[BTB.scala:187:7]
wire [7:0] io_bht_update_bits_prediction_history_0 = io_bht_update_bits_prediction_history; // @[BTB.scala:187:7]
wire io_bht_update_bits_prediction_value_0 = io_bht_update_bits_prediction_value; // @[BTB.scala:187:7]
wire [38:0] io_bht_update_bits_pc_0 = io_bht_update_bits_pc; // @[BTB.scala:187:7]
wire io_bht_update_bits_branch_0 = io_bht_update_bits_branch; // @[BTB.scala:187:7]
wire io_bht_update_bits_taken_0 = io_bht_update_bits_taken; // @[BTB.scala:187:7]
wire io_bht_update_bits_mispredict_0 = io_bht_update_bits_mispredict; // @[BTB.scala:187:7]
wire io_bht_advance_valid_0 = io_bht_advance_valid; // @[BTB.scala:187:7]
wire [1:0] io_bht_advance_bits_cfiType_0 = io_bht_advance_bits_cfiType; // @[BTB.scala:187:7]
wire io_bht_advance_bits_taken_0 = io_bht_advance_bits_taken; // @[BTB.scala:187:7]
wire [1:0] io_bht_advance_bits_mask_0 = io_bht_advance_bits_mask; // @[BTB.scala:187:7]
wire io_bht_advance_bits_bridx_0 = io_bht_advance_bits_bridx; // @[BTB.scala:187:7]
wire [38:0] io_bht_advance_bits_target_0 = io_bht_advance_bits_target; // @[BTB.scala:187:7]
wire [4:0] io_bht_advance_bits_entry_0 = io_bht_advance_bits_entry; // @[BTB.scala:187:7]
wire [7:0] io_bht_advance_bits_bht_history_0 = io_bht_advance_bits_bht_history; // @[BTB.scala:187:7]
wire io_bht_advance_bits_bht_value_0 = io_bht_advance_bits_bht_value; // @[BTB.scala:187:7]
wire io_ras_update_valid_0 = io_ras_update_valid; // @[BTB.scala:187:7]
wire [1:0] io_ras_update_bits_cfiType_0 = io_ras_update_bits_cfiType; // @[BTB.scala:187:7]
wire [38:0] io_ras_update_bits_returnAddr_0 = io_ras_update_bits_returnAddr; // @[BTB.scala:187:7]
wire io_flush_0 = io_flush; // @[BTB.scala:187:7]
wire io_btb_update_bits_taken = 1'h0; // @[BTB.scala:187:7]
wire r_btb_update_bits_taken = 1'h0; // @[Valid.scala:135:21]
wire _io_resp_valid_T_85; // @[BTB.scala:287:34]
wire [1:0] _io_resp_bits_cfiType_WIRE; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_WIRE; // @[Mux.scala:30:73]
wire [4:0] _io_resp_bits_entry_T_12; // @[OneHot.scala:32:10]
wire [7:0] res_history; // @[BTB.scala:91:19]
wire res_value; // @[BTB.scala:91:19]
wire _io_ras_head_valid_T_1; // @[BTB.scala:327:26]
wire [7:0] io_resp_bits_bht_history_0; // @[BTB.scala:187:7]
wire io_resp_bits_bht_value_0; // @[BTB.scala:187:7]
wire [1:0] io_resp_bits_cfiType_0; // @[BTB.scala:187:7]
wire io_resp_bits_taken_0; // @[BTB.scala:187:7]
wire [1:0] io_resp_bits_mask_0; // @[BTB.scala:187:7]
wire io_resp_bits_bridx_0; // @[BTB.scala:187:7]
wire [38:0] io_resp_bits_target_0; // @[BTB.scala:187:7]
wire [4:0] io_resp_bits_entry_0; // @[BTB.scala:187:7]
wire io_resp_valid_0; // @[BTB.scala:187:7]
wire io_ras_head_valid_0; // @[BTB.scala:187:7]
wire [38:0] io_ras_head_bits_0; // @[BTB.scala:187:7]
reg [12:0] idxs_0; // @[BTB.scala:199:17]
reg [12:0] idxs_1; // @[BTB.scala:199:17]
reg [12:0] idxs_2; // @[BTB.scala:199:17]
reg [12:0] idxs_3; // @[BTB.scala:199:17]
reg [12:0] idxs_4; // @[BTB.scala:199:17]
reg [12:0] idxs_5; // @[BTB.scala:199:17]
reg [12:0] idxs_6; // @[BTB.scala:199:17]
reg [12:0] idxs_7; // @[BTB.scala:199:17]
reg [12:0] idxs_8; // @[BTB.scala:199:17]
reg [12:0] idxs_9; // @[BTB.scala:199:17]
reg [12:0] idxs_10; // @[BTB.scala:199:17]
reg [12:0] idxs_11; // @[BTB.scala:199:17]
reg [12:0] idxs_12; // @[BTB.scala:199:17]
reg [12:0] idxs_13; // @[BTB.scala:199:17]
reg [12:0] idxs_14; // @[BTB.scala:199:17]
reg [12:0] idxs_15; // @[BTB.scala:199:17]
reg [12:0] idxs_16; // @[BTB.scala:199:17]
reg [12:0] idxs_17; // @[BTB.scala:199:17]
reg [12:0] idxs_18; // @[BTB.scala:199:17]
reg [12:0] idxs_19; // @[BTB.scala:199:17]
reg [12:0] idxs_20; // @[BTB.scala:199:17]
reg [12:0] idxs_21; // @[BTB.scala:199:17]
reg [12:0] idxs_22; // @[BTB.scala:199:17]
reg [12:0] idxs_23; // @[BTB.scala:199:17]
reg [12:0] idxs_24; // @[BTB.scala:199:17]
reg [12:0] idxs_25; // @[BTB.scala:199:17]
reg [12:0] idxs_26; // @[BTB.scala:199:17]
reg [12:0] idxs_27; // @[BTB.scala:199:17]
reg [2:0] idxPages_0; // @[BTB.scala:200:21]
reg [2:0] idxPages_1; // @[BTB.scala:200:21]
reg [2:0] idxPages_2; // @[BTB.scala:200:21]
reg [2:0] idxPages_3; // @[BTB.scala:200:21]
reg [2:0] idxPages_4; // @[BTB.scala:200:21]
reg [2:0] idxPages_5; // @[BTB.scala:200:21]
reg [2:0] idxPages_6; // @[BTB.scala:200:21]
reg [2:0] idxPages_7; // @[BTB.scala:200:21]
reg [2:0] idxPages_8; // @[BTB.scala:200:21]
reg [2:0] idxPages_9; // @[BTB.scala:200:21]
reg [2:0] idxPages_10; // @[BTB.scala:200:21]
reg [2:0] idxPages_11; // @[BTB.scala:200:21]
reg [2:0] idxPages_12; // @[BTB.scala:200:21]
reg [2:0] idxPages_13; // @[BTB.scala:200:21]
reg [2:0] idxPages_14; // @[BTB.scala:200:21]
reg [2:0] idxPages_15; // @[BTB.scala:200:21]
reg [2:0] idxPages_16; // @[BTB.scala:200:21]
reg [2:0] idxPages_17; // @[BTB.scala:200:21]
reg [2:0] idxPages_18; // @[BTB.scala:200:21]
reg [2:0] idxPages_19; // @[BTB.scala:200:21]
reg [2:0] idxPages_20; // @[BTB.scala:200:21]
reg [2:0] idxPages_21; // @[BTB.scala:200:21]
reg [2:0] idxPages_22; // @[BTB.scala:200:21]
reg [2:0] idxPages_23; // @[BTB.scala:200:21]
reg [2:0] idxPages_24; // @[BTB.scala:200:21]
reg [2:0] idxPages_25; // @[BTB.scala:200:21]
reg [2:0] idxPages_26; // @[BTB.scala:200:21]
reg [2:0] idxPages_27; // @[BTB.scala:200:21]
reg [12:0] tgts_0; // @[BTB.scala:201:17]
reg [12:0] tgts_1; // @[BTB.scala:201:17]
reg [12:0] tgts_2; // @[BTB.scala:201:17]
reg [12:0] tgts_3; // @[BTB.scala:201:17]
reg [12:0] tgts_4; // @[BTB.scala:201:17]
reg [12:0] tgts_5; // @[BTB.scala:201:17]
reg [12:0] tgts_6; // @[BTB.scala:201:17]
reg [12:0] tgts_7; // @[BTB.scala:201:17]
reg [12:0] tgts_8; // @[BTB.scala:201:17]
reg [12:0] tgts_9; // @[BTB.scala:201:17]
reg [12:0] tgts_10; // @[BTB.scala:201:17]
reg [12:0] tgts_11; // @[BTB.scala:201:17]
reg [12:0] tgts_12; // @[BTB.scala:201:17]
reg [12:0] tgts_13; // @[BTB.scala:201:17]
reg [12:0] tgts_14; // @[BTB.scala:201:17]
reg [12:0] tgts_15; // @[BTB.scala:201:17]
reg [12:0] tgts_16; // @[BTB.scala:201:17]
reg [12:0] tgts_17; // @[BTB.scala:201:17]
reg [12:0] tgts_18; // @[BTB.scala:201:17]
reg [12:0] tgts_19; // @[BTB.scala:201:17]
reg [12:0] tgts_20; // @[BTB.scala:201:17]
reg [12:0] tgts_21; // @[BTB.scala:201:17]
reg [12:0] tgts_22; // @[BTB.scala:201:17]
reg [12:0] tgts_23; // @[BTB.scala:201:17]
reg [12:0] tgts_24; // @[BTB.scala:201:17]
reg [12:0] tgts_25; // @[BTB.scala:201:17]
reg [12:0] tgts_26; // @[BTB.scala:201:17]
reg [12:0] tgts_27; // @[BTB.scala:201:17]
reg [2:0] tgtPages_0; // @[BTB.scala:202:21]
reg [2:0] tgtPages_1; // @[BTB.scala:202:21]
reg [2:0] tgtPages_2; // @[BTB.scala:202:21]
reg [2:0] tgtPages_3; // @[BTB.scala:202:21]
reg [2:0] tgtPages_4; // @[BTB.scala:202:21]
reg [2:0] tgtPages_5; // @[BTB.scala:202:21]
reg [2:0] tgtPages_6; // @[BTB.scala:202:21]
reg [2:0] tgtPages_7; // @[BTB.scala:202:21]
reg [2:0] tgtPages_8; // @[BTB.scala:202:21]
reg [2:0] tgtPages_9; // @[BTB.scala:202:21]
reg [2:0] tgtPages_10; // @[BTB.scala:202:21]
reg [2:0] tgtPages_11; // @[BTB.scala:202:21]
reg [2:0] tgtPages_12; // @[BTB.scala:202:21]
reg [2:0] tgtPages_13; // @[BTB.scala:202:21]
reg [2:0] tgtPages_14; // @[BTB.scala:202:21]
reg [2:0] tgtPages_15; // @[BTB.scala:202:21]
reg [2:0] tgtPages_16; // @[BTB.scala:202:21]
reg [2:0] tgtPages_17; // @[BTB.scala:202:21]
reg [2:0] tgtPages_18; // @[BTB.scala:202:21]
reg [2:0] tgtPages_19; // @[BTB.scala:202:21]
reg [2:0] tgtPages_20; // @[BTB.scala:202:21]
reg [2:0] tgtPages_21; // @[BTB.scala:202:21]
reg [2:0] tgtPages_22; // @[BTB.scala:202:21]
reg [2:0] tgtPages_23; // @[BTB.scala:202:21]
reg [2:0] tgtPages_24; // @[BTB.scala:202:21]
reg [2:0] tgtPages_25; // @[BTB.scala:202:21]
reg [2:0] tgtPages_26; // @[BTB.scala:202:21]
reg [2:0] tgtPages_27; // @[BTB.scala:202:21]
reg [24:0] pages_0; // @[BTB.scala:203:18]
reg [24:0] pages_1; // @[BTB.scala:203:18]
reg [24:0] pages_2; // @[BTB.scala:203:18]
reg [24:0] pages_3; // @[BTB.scala:203:18]
reg [24:0] pages_4; // @[BTB.scala:203:18]
reg [24:0] pages_5; // @[BTB.scala:203:18]
reg [5:0] pageValid; // @[BTB.scala:204:26]
wire _pagesMasked_T = pageValid[0]; // @[BTB.scala:204:26, :205:32]
wire _pagesMasked_T_1 = pageValid[1]; // @[BTB.scala:204:26, :205:32]
wire _pagesMasked_T_2 = pageValid[2]; // @[BTB.scala:204:26, :205:32]
wire _pagesMasked_T_3 = pageValid[3]; // @[BTB.scala:204:26, :205:32]
wire _pagesMasked_T_4 = pageValid[4]; // @[BTB.scala:204:26, :205:32]
wire _pagesMasked_T_5 = pageValid[5]; // @[BTB.scala:204:26, :205:32]
wire [24:0] pagesMasked_0 = _pagesMasked_T ? pages_0 : 25'h0; // @[BTB.scala:203:18, :205:{32,75}]
wire [24:0] pagesMasked_1 = _pagesMasked_T_1 ? pages_1 : 25'h0; // @[BTB.scala:203:18, :205:{32,75}]
wire [24:0] pagesMasked_2 = _pagesMasked_T_2 ? pages_2 : 25'h0; // @[BTB.scala:203:18, :205:{32,75}]
wire [24:0] pagesMasked_3 = _pagesMasked_T_3 ? pages_3 : 25'h0; // @[BTB.scala:203:18, :205:{32,75}]
wire [24:0] pagesMasked_4 = _pagesMasked_T_4 ? pages_4 : 25'h0; // @[BTB.scala:203:18, :205:{32,75}]
wire [24:0] pagesMasked_5 = _pagesMasked_T_5 ? pages_5 : 25'h0; // @[BTB.scala:203:18, :205:{32,75}]
reg [27:0] isValid; // @[BTB.scala:207:24]
reg [1:0] cfiType_0; // @[BTB.scala:208:20]
reg [1:0] cfiType_1; // @[BTB.scala:208:20]
reg [1:0] cfiType_2; // @[BTB.scala:208:20]
reg [1:0] cfiType_3; // @[BTB.scala:208:20]
reg [1:0] cfiType_4; // @[BTB.scala:208:20]
reg [1:0] cfiType_5; // @[BTB.scala:208:20]
reg [1:0] cfiType_6; // @[BTB.scala:208:20]
reg [1:0] cfiType_7; // @[BTB.scala:208:20]
reg [1:0] cfiType_8; // @[BTB.scala:208:20]
reg [1:0] cfiType_9; // @[BTB.scala:208:20]
reg [1:0] cfiType_10; // @[BTB.scala:208:20]
reg [1:0] cfiType_11; // @[BTB.scala:208:20]
reg [1:0] cfiType_12; // @[BTB.scala:208:20]
reg [1:0] cfiType_13; // @[BTB.scala:208:20]
reg [1:0] cfiType_14; // @[BTB.scala:208:20]
reg [1:0] cfiType_15; // @[BTB.scala:208:20]
reg [1:0] cfiType_16; // @[BTB.scala:208:20]
reg [1:0] cfiType_17; // @[BTB.scala:208:20]
reg [1:0] cfiType_18; // @[BTB.scala:208:20]
reg [1:0] cfiType_19; // @[BTB.scala:208:20]
reg [1:0] cfiType_20; // @[BTB.scala:208:20]
reg [1:0] cfiType_21; // @[BTB.scala:208:20]
reg [1:0] cfiType_22; // @[BTB.scala:208:20]
reg [1:0] cfiType_23; // @[BTB.scala:208:20]
reg [1:0] cfiType_24; // @[BTB.scala:208:20]
reg [1:0] cfiType_25; // @[BTB.scala:208:20]
reg [1:0] cfiType_26; // @[BTB.scala:208:20]
reg [1:0] cfiType_27; // @[BTB.scala:208:20]
reg brIdx_0; // @[BTB.scala:209:18]
reg brIdx_1; // @[BTB.scala:209:18]
reg brIdx_2; // @[BTB.scala:209:18]
reg brIdx_3; // @[BTB.scala:209:18]
reg brIdx_4; // @[BTB.scala:209:18]
reg brIdx_5; // @[BTB.scala:209:18]
reg brIdx_6; // @[BTB.scala:209:18]
reg brIdx_7; // @[BTB.scala:209:18]
reg brIdx_8; // @[BTB.scala:209:18]
reg brIdx_9; // @[BTB.scala:209:18]
reg brIdx_10; // @[BTB.scala:209:18]
reg brIdx_11; // @[BTB.scala:209:18]
reg brIdx_12; // @[BTB.scala:209:18]
reg brIdx_13; // @[BTB.scala:209:18]
reg brIdx_14; // @[BTB.scala:209:18]
reg brIdx_15; // @[BTB.scala:209:18]
reg brIdx_16; // @[BTB.scala:209:18]
reg brIdx_17; // @[BTB.scala:209:18]
reg brIdx_18; // @[BTB.scala:209:18]
reg brIdx_19; // @[BTB.scala:209:18]
reg brIdx_20; // @[BTB.scala:209:18]
reg brIdx_21; // @[BTB.scala:209:18]
reg brIdx_22; // @[BTB.scala:209:18]
reg brIdx_23; // @[BTB.scala:209:18]
reg brIdx_24; // @[BTB.scala:209:18]
reg brIdx_25; // @[BTB.scala:209:18]
reg brIdx_26; // @[BTB.scala:209:18]
reg brIdx_27; // @[BTB.scala:209:18]
reg r_btb_update_pipe_v; // @[Valid.scala:141:24]
wire r_btb_update_valid = r_btb_update_pipe_v; // @[Valid.scala:135:21, :141:24]
reg [1:0] r_btb_update_pipe_b_prediction_cfiType; // @[Valid.scala:142:26]
wire [1:0] r_btb_update_bits_prediction_cfiType = r_btb_update_pipe_b_prediction_cfiType; // @[Valid.scala:135:21, :142:26]
reg r_btb_update_pipe_b_prediction_taken; // @[Valid.scala:142:26]
wire r_btb_update_bits_prediction_taken = r_btb_update_pipe_b_prediction_taken; // @[Valid.scala:135:21, :142:26]
reg [1:0] r_btb_update_pipe_b_prediction_mask; // @[Valid.scala:142:26]
wire [1:0] r_btb_update_bits_prediction_mask = r_btb_update_pipe_b_prediction_mask; // @[Valid.scala:135:21, :142:26]
reg r_btb_update_pipe_b_prediction_bridx; // @[Valid.scala:142:26]
wire r_btb_update_bits_prediction_bridx = r_btb_update_pipe_b_prediction_bridx; // @[Valid.scala:135:21, :142:26]
reg [38:0] r_btb_update_pipe_b_prediction_target; // @[Valid.scala:142:26]
wire [38:0] r_btb_update_bits_prediction_target = r_btb_update_pipe_b_prediction_target; // @[Valid.scala:135:21, :142:26]
reg [4:0] r_btb_update_pipe_b_prediction_entry; // @[Valid.scala:142:26]
wire [4:0] r_btb_update_bits_prediction_entry = r_btb_update_pipe_b_prediction_entry; // @[Valid.scala:135:21, :142:26]
reg [7:0] r_btb_update_pipe_b_prediction_bht_history; // @[Valid.scala:142:26]
wire [7:0] r_btb_update_bits_prediction_bht_history = r_btb_update_pipe_b_prediction_bht_history; // @[Valid.scala:135:21, :142:26]
reg r_btb_update_pipe_b_prediction_bht_value; // @[Valid.scala:142:26]
wire r_btb_update_bits_prediction_bht_value = r_btb_update_pipe_b_prediction_bht_value; // @[Valid.scala:135:21, :142:26]
reg [38:0] r_btb_update_pipe_b_pc; // @[Valid.scala:142:26]
wire [38:0] r_btb_update_bits_pc = r_btb_update_pipe_b_pc; // @[Valid.scala:135:21, :142:26]
reg [38:0] r_btb_update_pipe_b_target; // @[Valid.scala:142:26]
wire [38:0] r_btb_update_bits_target = r_btb_update_pipe_b_target; // @[Valid.scala:135:21, :142:26]
reg r_btb_update_pipe_b_isValid; // @[Valid.scala:142:26]
wire r_btb_update_bits_isValid = r_btb_update_pipe_b_isValid; // @[Valid.scala:135:21, :142:26]
reg [38:0] r_btb_update_pipe_b_br_pc; // @[Valid.scala:142:26]
wire [38:0] r_btb_update_bits_br_pc = r_btb_update_pipe_b_br_pc; // @[Valid.scala:135:21, :142:26]
reg [1:0] r_btb_update_pipe_b_cfiType; // @[Valid.scala:142:26]
wire [1:0] r_btb_update_bits_cfiType = r_btb_update_pipe_b_cfiType; // @[Valid.scala:135:21, :142:26]
wire [24:0] pageHit_p = io_req_bits_addr_0[38:14]; // @[BTB.scala:187:7, :211:39]
wire [24:0] _samePage_T_1 = io_req_bits_addr_0[38:14]; // @[BTB.scala:187:7, :211:39]
wire _pageHit_T = pages_0 == pageHit_p; // @[BTB.scala:203:18, :211:39, :214:29]
wire _pageHit_T_1 = pages_1 == pageHit_p; // @[BTB.scala:203:18, :211:39, :214:29]
wire _pageHit_T_2 = pages_2 == pageHit_p; // @[BTB.scala:203:18, :211:39, :214:29]
wire _pageHit_T_3 = pages_3 == pageHit_p; // @[BTB.scala:203:18, :211:39, :214:29]
wire _pageHit_T_4 = pages_4 == pageHit_p; // @[BTB.scala:203:18, :211:39, :214:29]
wire _pageHit_T_5 = pages_5 == pageHit_p; // @[BTB.scala:203:18, :211:39, :214:29]
wire [1:0] pageHit_lo_hi = {_pageHit_T_2, _pageHit_T_1}; // @[package.scala:45:27]
wire [2:0] pageHit_lo = {pageHit_lo_hi, _pageHit_T}; // @[package.scala:45:27]
wire [1:0] pageHit_hi_hi = {_pageHit_T_5, _pageHit_T_4}; // @[package.scala:45:27]
wire [2:0] pageHit_hi = {pageHit_hi_hi, _pageHit_T_3}; // @[package.scala:45:27]
wire [5:0] _pageHit_T_6 = {pageHit_hi, pageHit_lo}; // @[package.scala:45:27]
wire [5:0] pageHit = pageValid & _pageHit_T_6; // @[package.scala:45:27]
wire [12:0] idxHit_idx = io_req_bits_addr_0[13:1]; // @[BTB.scala:187:7, :217:19]
wire [12:0] _tgts_T = io_req_bits_addr_0[13:1]; // @[BTB.scala:187:7, :217:19, :265:33]
wire _idxHit_T = idxs_0 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_1 = idxs_1 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_2 = idxs_2 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_3 = idxs_3 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_4 = idxs_4 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_5 = idxs_5 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_6 = idxs_6 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_7 = idxs_7 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_8 = idxs_8 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_9 = idxs_9 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_10 = idxs_10 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_11 = idxs_11 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_12 = idxs_12 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_13 = idxs_13 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_14 = idxs_14 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_15 = idxs_15 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_16 = idxs_16 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_17 = idxs_17 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_18 = idxs_18 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_19 = idxs_19 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_20 = idxs_20 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_21 = idxs_21 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_22 = idxs_22 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_23 = idxs_23 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_24 = idxs_24 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_25 = idxs_25 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_26 = idxs_26 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire _idxHit_T_27 = idxs_27 == idxHit_idx; // @[BTB.scala:199:17, :217:19, :218:16]
wire [1:0] idxHit_lo_lo_lo_hi = {_idxHit_T_2, _idxHit_T_1}; // @[package.scala:45:27]
wire [2:0] idxHit_lo_lo_lo = {idxHit_lo_lo_lo_hi, _idxHit_T}; // @[package.scala:45:27]
wire [1:0] idxHit_lo_lo_hi_lo = {_idxHit_T_4, _idxHit_T_3}; // @[package.scala:45:27]
wire [1:0] idxHit_lo_lo_hi_hi = {_idxHit_T_6, _idxHit_T_5}; // @[package.scala:45:27]
wire [3:0] idxHit_lo_lo_hi = {idxHit_lo_lo_hi_hi, idxHit_lo_lo_hi_lo}; // @[package.scala:45:27]
wire [6:0] idxHit_lo_lo = {idxHit_lo_lo_hi, idxHit_lo_lo_lo}; // @[package.scala:45:27]
wire [1:0] idxHit_lo_hi_lo_hi = {_idxHit_T_9, _idxHit_T_8}; // @[package.scala:45:27]
wire [2:0] idxHit_lo_hi_lo = {idxHit_lo_hi_lo_hi, _idxHit_T_7}; // @[package.scala:45:27]
wire [1:0] idxHit_lo_hi_hi_lo = {_idxHit_T_11, _idxHit_T_10}; // @[package.scala:45:27]
wire [1:0] idxHit_lo_hi_hi_hi = {_idxHit_T_13, _idxHit_T_12}; // @[package.scala:45:27]
wire [3:0] idxHit_lo_hi_hi = {idxHit_lo_hi_hi_hi, idxHit_lo_hi_hi_lo}; // @[package.scala:45:27]
wire [6:0] idxHit_lo_hi = {idxHit_lo_hi_hi, idxHit_lo_hi_lo}; // @[package.scala:45:27]
wire [13:0] idxHit_lo = {idxHit_lo_hi, idxHit_lo_lo}; // @[package.scala:45:27]
wire [1:0] idxHit_hi_lo_lo_hi = {_idxHit_T_16, _idxHit_T_15}; // @[package.scala:45:27]
wire [2:0] idxHit_hi_lo_lo = {idxHit_hi_lo_lo_hi, _idxHit_T_14}; // @[package.scala:45:27]
wire [1:0] idxHit_hi_lo_hi_lo = {_idxHit_T_18, _idxHit_T_17}; // @[package.scala:45:27]
wire [1:0] idxHit_hi_lo_hi_hi = {_idxHit_T_20, _idxHit_T_19}; // @[package.scala:45:27]
wire [3:0] idxHit_hi_lo_hi = {idxHit_hi_lo_hi_hi, idxHit_hi_lo_hi_lo}; // @[package.scala:45:27]
wire [6:0] idxHit_hi_lo = {idxHit_hi_lo_hi, idxHit_hi_lo_lo}; // @[package.scala:45:27]
wire [1:0] idxHit_hi_hi_lo_hi = {_idxHit_T_23, _idxHit_T_22}; // @[package.scala:45:27]
wire [2:0] idxHit_hi_hi_lo = {idxHit_hi_hi_lo_hi, _idxHit_T_21}; // @[package.scala:45:27]
wire [1:0] idxHit_hi_hi_hi_lo = {_idxHit_T_25, _idxHit_T_24}; // @[package.scala:45:27]
wire [1:0] idxHit_hi_hi_hi_hi = {_idxHit_T_27, _idxHit_T_26}; // @[package.scala:45:27]
wire [3:0] idxHit_hi_hi_hi = {idxHit_hi_hi_hi_hi, idxHit_hi_hi_hi_lo}; // @[package.scala:45:27]
wire [6:0] idxHit_hi_hi = {idxHit_hi_hi_hi, idxHit_hi_hi_lo}; // @[package.scala:45:27]
wire [13:0] idxHit_hi = {idxHit_hi_hi, idxHit_hi_lo}; // @[package.scala:45:27]
wire [27:0] _idxHit_T_28 = {idxHit_hi, idxHit_lo}; // @[package.scala:45:27]
wire [27:0] idxHit = _idxHit_T_28 & isValid; // @[package.scala:45:27]
wire [24:0] updatePageHit_p = r_btb_update_bits_pc[38:14]; // @[Valid.scala:135:21]
wire [24:0] _samePage_T = r_btb_update_bits_pc[38:14]; // @[Valid.scala:135:21]
wire _updatePageHit_T = pages_0 == updatePageHit_p; // @[BTB.scala:203:18, :211:39, :214:29]
wire _updatePageHit_T_1 = pages_1 == updatePageHit_p; // @[BTB.scala:203:18, :211:39, :214:29]
wire _updatePageHit_T_2 = pages_2 == updatePageHit_p; // @[BTB.scala:203:18, :211:39, :214:29]
wire _updatePageHit_T_3 = pages_3 == updatePageHit_p; // @[BTB.scala:203:18, :211:39, :214:29]
wire _updatePageHit_T_4 = pages_4 == updatePageHit_p; // @[BTB.scala:203:18, :211:39, :214:29]
wire _updatePageHit_T_5 = pages_5 == updatePageHit_p; // @[BTB.scala:203:18, :211:39, :214:29]
wire [1:0] updatePageHit_lo_hi = {_updatePageHit_T_2, _updatePageHit_T_1}; // @[package.scala:45:27]
wire [2:0] updatePageHit_lo = {updatePageHit_lo_hi, _updatePageHit_T}; // @[package.scala:45:27]
wire [1:0] updatePageHit_hi_hi = {_updatePageHit_T_5, _updatePageHit_T_4}; // @[package.scala:45:27]
wire [2:0] updatePageHit_hi = {updatePageHit_hi_hi, _updatePageHit_T_3}; // @[package.scala:45:27]
wire [5:0] _updatePageHit_T_6 = {updatePageHit_hi, updatePageHit_lo}; // @[package.scala:45:27]
wire [5:0] updatePageHit = pageValid & _updatePageHit_T_6; // @[package.scala:45:27]
wire updateHit = r_btb_update_bits_prediction_entry[4:2] != 3'h7; // @[Valid.scala:135:21]
wire useUpdatePageHit = |updatePageHit; // @[BTB.scala:214:15, :234:40]
wire usePageHit = |pageHit; // @[BTB.scala:214:15, :235:28]
wire doIdxPageRepl = ~useUpdatePageHit; // @[BTB.scala:234:40, :236:23]
reg [2:0] nextPageRepl; // @[BTB.scala:237:29]
wire [4:0] _idxPageRepl_T = pageHit[4:0]; // @[BTB.scala:214:15, :238:32]
wire _idxPageRepl_T_1 = pageHit[5]; // @[BTB.scala:214:15, :238:53]
wire [5:0] _idxPageRepl_T_2 = {_idxPageRepl_T, _idxPageRepl_T_1}; // @[BTB.scala:238:{24,32,53}]
wire [7:0] _idxPageRepl_T_3 = 8'h1 << nextPageRepl; // @[OneHot.scala:58:35]
wire [7:0] _idxPageRepl_T_4 = usePageHit ? 8'h0 : _idxPageRepl_T_3; // @[OneHot.scala:58:35]
wire [7:0] idxPageRepl = {2'h0, _idxPageRepl_T_2} | _idxPageRepl_T_4; // @[BTB.scala:238:{24,65,70}]
wire [7:0] idxPageUpdateOH = useUpdatePageHit ? {2'h0, updatePageHit} : idxPageRepl; // @[BTB.scala:214:15, :234:40, :238:65, :239:28]
wire [3:0] idxPageUpdate_hi = idxPageUpdateOH[7:4]; // @[OneHot.scala:30:18]
wire [3:0] idxPageUpdate_lo = idxPageUpdateOH[3:0]; // @[OneHot.scala:31:18]
wire _idxPageUpdate_T = |idxPageUpdate_hi; // @[OneHot.scala:30:18, :32:14]
wire [3:0] _idxPageUpdate_T_1 = idxPageUpdate_hi | idxPageUpdate_lo; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [1:0] idxPageUpdate_hi_1 = _idxPageUpdate_T_1[3:2]; // @[OneHot.scala:30:18, :32:28]
wire [1:0] idxPageUpdate_lo_1 = _idxPageUpdate_T_1[1:0]; // @[OneHot.scala:31:18, :32:28]
wire _idxPageUpdate_T_2 = |idxPageUpdate_hi_1; // @[OneHot.scala:30:18, :32:14]
wire [1:0] _idxPageUpdate_T_3 = idxPageUpdate_hi_1 | idxPageUpdate_lo_1; // @[OneHot.scala:30:18, :31:18, :32:28]
wire _idxPageUpdate_T_4 = _idxPageUpdate_T_3[1]; // @[OneHot.scala:32:28]
wire [1:0] _idxPageUpdate_T_5 = {_idxPageUpdate_T_2, _idxPageUpdate_T_4}; // @[OneHot.scala:32:{10,14}]
wire [2:0] idxPageUpdate = {_idxPageUpdate_T, _idxPageUpdate_T_5}; // @[OneHot.scala:32:{10,14}]
wire [7:0] idxPageReplEn = doIdxPageRepl ? idxPageRepl : 8'h0; // @[BTB.scala:236:23, :238:65, :241:26]
wire samePage = _samePage_T == _samePage_T_1; // @[BTB.scala:211:39, :243:45]
wire _doTgtPageRepl_T = ~samePage; // @[BTB.scala:243:45, :244:23]
wire _doTgtPageRepl_T_1 = ~usePageHit; // @[BTB.scala:235:28, :244:36]
wire doTgtPageRepl = _doTgtPageRepl_T & _doTgtPageRepl_T_1; // @[BTB.scala:244:{23,33,36}]
wire [4:0] _tgtPageRepl_T = idxPageUpdateOH[4:0]; // @[BTB.scala:239:28, :245:71]
wire _tgtPageRepl_T_1 = idxPageUpdateOH[5]; // @[BTB.scala:239:28, :245:100]
wire [5:0] _tgtPageRepl_T_2 = {_tgtPageRepl_T, _tgtPageRepl_T_1}; // @[BTB.scala:245:{55,71,100}]
wire [7:0] tgtPageRepl = samePage ? idxPageUpdateOH : {2'h0, _tgtPageRepl_T_2}; // @[BTB.scala:239:28, :243:45, :245:{24,55}]
wire [7:0] _tgtPageUpdate_T = usePageHit ? 8'h0 : tgtPageRepl; // @[BTB.scala:235:28, :245:24, :246:45]
wire [7:0] _tgtPageUpdate_T_1 = {2'h0, pageHit} | _tgtPageUpdate_T; // @[BTB.scala:214:15, :246:{40,45}]
wire [3:0] tgtPageUpdate_hi = _tgtPageUpdate_T_1[7:4]; // @[OneHot.scala:30:18]
wire [3:0] tgtPageUpdate_lo = _tgtPageUpdate_T_1[3:0]; // @[OneHot.scala:31:18]
wire _tgtPageUpdate_T_2 = |tgtPageUpdate_hi; // @[OneHot.scala:30:18, :32:14]
wire [3:0] _tgtPageUpdate_T_3 = tgtPageUpdate_hi | tgtPageUpdate_lo; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [1:0] tgtPageUpdate_hi_1 = _tgtPageUpdate_T_3[3:2]; // @[OneHot.scala:30:18, :32:28]
wire [1:0] tgtPageUpdate_lo_1 = _tgtPageUpdate_T_3[1:0]; // @[OneHot.scala:31:18, :32:28]
wire _tgtPageUpdate_T_4 = |tgtPageUpdate_hi_1; // @[OneHot.scala:30:18, :32:14]
wire [1:0] _tgtPageUpdate_T_5 = tgtPageUpdate_hi_1 | tgtPageUpdate_lo_1; // @[OneHot.scala:30:18, :31:18, :32:28]
wire _tgtPageUpdate_T_6 = _tgtPageUpdate_T_5[1]; // @[OneHot.scala:32:28]
wire [1:0] _tgtPageUpdate_T_7 = {_tgtPageUpdate_T_4, _tgtPageUpdate_T_6}; // @[OneHot.scala:32:{10,14}]
wire [2:0] tgtPageUpdate = {_tgtPageUpdate_T_2, _tgtPageUpdate_T_7}; // @[OneHot.scala:32:{10,14}]
wire [7:0] tgtPageReplEn = doTgtPageRepl ? tgtPageRepl : 8'h0; // @[BTB.scala:244:33, :245:24, :247:26]
wire both = doIdxPageRepl & doTgtPageRepl; // @[BTB.scala:236:23, :244:33, :250:30]
wire [1:0] _next_T = both ? 2'h2 : 2'h1; // @[BTB.scala:250:30, :251:40, :292:33]
wire [3:0] _next_T_1 = {1'h0, nextPageRepl} + {2'h0, _next_T}; // @[BTB.scala:237:29, :251:{29,40}]
wire [2:0] next = _next_T_1[2:0]; // @[BTB.scala:251:29]
wire _nextPageRepl_T = next > 3'h5; // @[BTB.scala:251:29, :252:30]
wire _nextPageRepl_T_1 = next[0]; // @[BTB.scala:251:29, :252:47]
wire [2:0] _nextPageRepl_T_2 = _nextPageRepl_T ? {2'h0, _nextPageRepl_T_1} : next; // @[BTB.scala:251:29, :252:{24,30,47}]
reg [26:0] state_reg; // @[Replacement.scala:168:70]
wire waddr_left_subtree_older = state_reg[26]; // @[Replacement.scala:168:70, :243:38]
wire [10:0] waddr_left_subtree_state = state_reg[25:15]; // @[package.scala:163:13]
wire [10:0] state_reg_left_subtree_state = state_reg[25:15]; // @[package.scala:163:13]
wire [14:0] waddr_right_subtree_state = state_reg[14:0]; // @[Replacement.scala:168:70, :245:38]
wire [14:0] state_reg_right_subtree_state = state_reg[14:0]; // @[Replacement.scala:168:70, :198:38, :245:38]
wire waddr_left_subtree_older_1 = waddr_left_subtree_state[10]; // @[package.scala:163:13]
wire [2:0] waddr_left_subtree_state_1 = waddr_left_subtree_state[9:7]; // @[package.scala:163:13]
wire [6:0] waddr_right_subtree_state_1 = waddr_left_subtree_state[6:0]; // @[package.scala:163:13]
wire waddr_left_subtree_older_2 = waddr_left_subtree_state_1[2]; // @[package.scala:163:13]
wire waddr_left_subtree_state_2 = waddr_left_subtree_state_1[1]; // @[package.scala:163:13]
wire _waddr_T = waddr_left_subtree_state_2; // @[package.scala:163:13]
wire waddr_right_subtree_state_2 = waddr_left_subtree_state_1[0]; // @[package.scala:163:13]
wire _waddr_T_1 = waddr_right_subtree_state_2; // @[Replacement.scala:245:38, :262:12]
wire _waddr_T_2 = waddr_left_subtree_older_2 ? _waddr_T : _waddr_T_1; // @[Replacement.scala:243:38, :250:16, :262:12]
wire [1:0] _waddr_T_3 = {waddr_left_subtree_older_2, _waddr_T_2}; // @[Replacement.scala:243:38, :249:12, :250:16]
wire waddr_left_subtree_older_3 = waddr_right_subtree_state_1[6]; // @[Replacement.scala:243:38, :245:38]
wire [2:0] waddr_left_subtree_state_3 = waddr_right_subtree_state_1[5:3]; // @[package.scala:163:13]
wire [2:0] waddr_right_subtree_state_3 = waddr_right_subtree_state_1[2:0]; // @[Replacement.scala:245:38]
wire waddr_left_subtree_older_4 = waddr_left_subtree_state_3[2]; // @[package.scala:163:13]
wire waddr_left_subtree_state_4 = waddr_left_subtree_state_3[1]; // @[package.scala:163:13]
wire _waddr_T_4 = waddr_left_subtree_state_4; // @[package.scala:163:13]
wire waddr_right_subtree_state_4 = waddr_left_subtree_state_3[0]; // @[package.scala:163:13]
wire _waddr_T_5 = waddr_right_subtree_state_4; // @[Replacement.scala:245:38, :262:12]
wire _waddr_T_6 = waddr_left_subtree_older_4 ? _waddr_T_4 : _waddr_T_5; // @[Replacement.scala:243:38, :250:16, :262:12]
wire [1:0] _waddr_T_7 = {waddr_left_subtree_older_4, _waddr_T_6}; // @[Replacement.scala:243:38, :249:12, :250:16]
wire waddr_left_subtree_older_5 = waddr_right_subtree_state_3[2]; // @[Replacement.scala:243:38, :245:38]
wire waddr_left_subtree_state_5 = waddr_right_subtree_state_3[1]; // @[package.scala:163:13]
wire _waddr_T_8 = waddr_left_subtree_state_5; // @[package.scala:163:13]
wire waddr_right_subtree_state_5 = waddr_right_subtree_state_3[0]; // @[Replacement.scala:245:38]
wire _waddr_T_9 = waddr_right_subtree_state_5; // @[Replacement.scala:245:38, :262:12]
wire _waddr_T_10 = waddr_left_subtree_older_5 ? _waddr_T_8 : _waddr_T_9; // @[Replacement.scala:243:38, :250:16, :262:12]
wire [1:0] _waddr_T_11 = {waddr_left_subtree_older_5, _waddr_T_10}; // @[Replacement.scala:243:38, :249:12, :250:16]
wire [1:0] _waddr_T_12 = waddr_left_subtree_older_3 ? _waddr_T_7 : _waddr_T_11; // @[Replacement.scala:243:38, :249:12, :250:16]
wire [2:0] _waddr_T_13 = {waddr_left_subtree_older_3, _waddr_T_12}; // @[Replacement.scala:243:38, :249:12, :250:16]
wire [2:0] _waddr_T_14 = waddr_left_subtree_older_1 ? {1'h0, _waddr_T_3} : _waddr_T_13; // @[Replacement.scala:243:38, :249:12, :250:16]
wire [3:0] _waddr_T_15 = {waddr_left_subtree_older_1, _waddr_T_14}; // @[Replacement.scala:243:38, :249:12, :250:16]
wire waddr_left_subtree_older_6 = waddr_right_subtree_state[14]; // @[Replacement.scala:243:38, :245:38]
wire [6:0] waddr_left_subtree_state_6 = waddr_right_subtree_state[13:7]; // @[package.scala:163:13]
wire [6:0] waddr_right_subtree_state_6 = waddr_right_subtree_state[6:0]; // @[Replacement.scala:245:38]
wire waddr_left_subtree_older_7 = waddr_left_subtree_state_6[6]; // @[package.scala:163:13]
wire [2:0] waddr_left_subtree_state_7 = waddr_left_subtree_state_6[5:3]; // @[package.scala:163:13]
wire [2:0] waddr_right_subtree_state_7 = waddr_left_subtree_state_6[2:0]; // @[package.scala:163:13]
wire waddr_left_subtree_older_8 = waddr_left_subtree_state_7[2]; // @[package.scala:163:13]
wire waddr_left_subtree_state_8 = waddr_left_subtree_state_7[1]; // @[package.scala:163:13]
wire _waddr_T_16 = waddr_left_subtree_state_8; // @[package.scala:163:13]
wire waddr_right_subtree_state_8 = waddr_left_subtree_state_7[0]; // @[package.scala:163:13]
wire _waddr_T_17 = waddr_right_subtree_state_8; // @[Replacement.scala:245:38, :262:12]
wire _waddr_T_18 = waddr_left_subtree_older_8 ? _waddr_T_16 : _waddr_T_17; // @[Replacement.scala:243:38, :250:16, :262:12]
wire [1:0] _waddr_T_19 = {waddr_left_subtree_older_8, _waddr_T_18}; // @[Replacement.scala:243:38, :249:12, :250:16]
wire waddr_left_subtree_older_9 = waddr_right_subtree_state_7[2]; // @[Replacement.scala:243:38, :245:38]
wire waddr_left_subtree_state_9 = waddr_right_subtree_state_7[1]; // @[package.scala:163:13]
wire _waddr_T_20 = waddr_left_subtree_state_9; // @[package.scala:163:13]
wire waddr_right_subtree_state_9 = waddr_right_subtree_state_7[0]; // @[Replacement.scala:245:38]
wire _waddr_T_21 = waddr_right_subtree_state_9; // @[Replacement.scala:245:38, :262:12]
wire _waddr_T_22 = waddr_left_subtree_older_9 ? _waddr_T_20 : _waddr_T_21; // @[Replacement.scala:243:38, :250:16, :262:12]
wire [1:0] _waddr_T_23 = {waddr_left_subtree_older_9, _waddr_T_22}; // @[Replacement.scala:243:38, :249:12, :250:16]
wire [1:0] _waddr_T_24 = waddr_left_subtree_older_7 ? _waddr_T_19 : _waddr_T_23; // @[Replacement.scala:243:38, :249:12, :250:16]
wire [2:0] _waddr_T_25 = {waddr_left_subtree_older_7, _waddr_T_24}; // @[Replacement.scala:243:38, :249:12, :250:16]
wire waddr_left_subtree_older_10 = waddr_right_subtree_state_6[6]; // @[Replacement.scala:243:38, :245:38]
wire [2:0] waddr_left_subtree_state_10 = waddr_right_subtree_state_6[5:3]; // @[package.scala:163:13]
wire [2:0] waddr_right_subtree_state_10 = waddr_right_subtree_state_6[2:0]; // @[Replacement.scala:245:38]
wire waddr_left_subtree_older_11 = waddr_left_subtree_state_10[2]; // @[package.scala:163:13]
wire waddr_left_subtree_state_11 = waddr_left_subtree_state_10[1]; // @[package.scala:163:13]
wire _waddr_T_26 = waddr_left_subtree_state_11; // @[package.scala:163:13]
wire waddr_right_subtree_state_11 = waddr_left_subtree_state_10[0]; // @[package.scala:163:13]
wire _waddr_T_27 = waddr_right_subtree_state_11; // @[Replacement.scala:245:38, :262:12]
wire _waddr_T_28 = waddr_left_subtree_older_11 ? _waddr_T_26 : _waddr_T_27; // @[Replacement.scala:243:38, :250:16, :262:12]
wire [1:0] _waddr_T_29 = {waddr_left_subtree_older_11, _waddr_T_28}; // @[Replacement.scala:243:38, :249:12, :250:16]
wire waddr_left_subtree_older_12 = waddr_right_subtree_state_10[2]; // @[Replacement.scala:243:38, :245:38]
wire waddr_left_subtree_state_12 = waddr_right_subtree_state_10[1]; // @[package.scala:163:13]
wire _waddr_T_30 = waddr_left_subtree_state_12; // @[package.scala:163:13]
wire waddr_right_subtree_state_12 = waddr_right_subtree_state_10[0]; // @[Replacement.scala:245:38]
wire _waddr_T_31 = waddr_right_subtree_state_12; // @[Replacement.scala:245:38, :262:12]
wire _waddr_T_32 = waddr_left_subtree_older_12 ? _waddr_T_30 : _waddr_T_31; // @[Replacement.scala:243:38, :250:16, :262:12]
wire [1:0] _waddr_T_33 = {waddr_left_subtree_older_12, _waddr_T_32}; // @[Replacement.scala:243:38, :249:12, :250:16]
wire [1:0] _waddr_T_34 = waddr_left_subtree_older_10 ? _waddr_T_29 : _waddr_T_33; // @[Replacement.scala:243:38, :249:12, :250:16]
wire [2:0] _waddr_T_35 = {waddr_left_subtree_older_10, _waddr_T_34}; // @[Replacement.scala:243:38, :249:12, :250:16]
wire [2:0] _waddr_T_36 = waddr_left_subtree_older_6 ? _waddr_T_25 : _waddr_T_35; // @[Replacement.scala:243:38, :249:12, :250:16]
wire [3:0] _waddr_T_37 = {waddr_left_subtree_older_6, _waddr_T_36}; // @[Replacement.scala:243:38, :249:12, :250:16]
wire [3:0] _waddr_T_38 = waddr_left_subtree_older ? _waddr_T_15 : _waddr_T_37; // @[Replacement.scala:243:38, :249:12, :250:16]
wire [4:0] _waddr_T_39 = {waddr_left_subtree_older, _waddr_T_38}; // @[Replacement.scala:243:38, :249:12, :250:16]
wire [4:0] waddr = updateHit ? r_btb_update_bits_prediction_entry : _waddr_T_39; // @[Valid.scala:135:21]
reg r_resp_pipe_v; // @[Valid.scala:141:24]
wire r_resp_valid = r_resp_pipe_v; // @[Valid.scala:135:21, :141:24]
reg [1:0] r_resp_pipe_b_cfiType; // @[Valid.scala:142:26]
wire [1:0] r_resp_bits_cfiType = r_resp_pipe_b_cfiType; // @[Valid.scala:135:21, :142:26]
reg r_resp_pipe_b_taken; // @[Valid.scala:142:26]
wire r_resp_bits_taken = r_resp_pipe_b_taken; // @[Valid.scala:135:21, :142:26]
reg [1:0] r_resp_pipe_b_mask; // @[Valid.scala:142:26]
wire [1:0] r_resp_bits_mask = r_resp_pipe_b_mask; // @[Valid.scala:135:21, :142:26]
reg r_resp_pipe_b_bridx; // @[Valid.scala:142:26]
wire r_resp_bits_bridx = r_resp_pipe_b_bridx; // @[Valid.scala:135:21, :142:26]
reg [38:0] r_resp_pipe_b_target; // @[Valid.scala:142:26]
wire [38:0] r_resp_bits_target = r_resp_pipe_b_target; // @[Valid.scala:135:21, :142:26]
reg [4:0] r_resp_pipe_b_entry; // @[Valid.scala:142:26]
wire [4:0] r_resp_bits_entry = r_resp_pipe_b_entry; // @[Valid.scala:135:21, :142:26]
reg [7:0] r_resp_pipe_b_bht_history; // @[Valid.scala:142:26]
wire [7:0] r_resp_bits_bht_history = r_resp_pipe_b_bht_history; // @[Valid.scala:135:21, :142:26]
reg r_resp_pipe_b_bht_value; // @[Valid.scala:142:26]
wire r_resp_bits_bht_value = r_resp_pipe_b_bht_value; // @[Valid.scala:135:21, :142:26]
wire [4:0] state_reg_touch_way_sized = r_btb_update_valid ? waddr : r_resp_bits_entry; // @[Valid.scala:135:21]
wire _state_reg_set_left_older_T = state_reg_touch_way_sized[4]; // @[package.scala:163:13]
wire state_reg_set_left_older = ~_state_reg_set_left_older_T; // @[Replacement.scala:196:{33,43}]
wire [3:0] _state_reg_T = state_reg_touch_way_sized[3:0]; // @[package.scala:163:13]
wire [3:0] _state_reg_T_39 = state_reg_touch_way_sized[3:0]; // @[package.scala:163:13]
wire _state_reg_set_left_older_T_1 = _state_reg_T[3]; // @[package.scala:163:13]
wire state_reg_set_left_older_1 = ~_state_reg_set_left_older_T_1; // @[Replacement.scala:196:{33,43}]
wire [2:0] state_reg_left_subtree_state_1 = state_reg_left_subtree_state[9:7]; // @[package.scala:163:13]
wire [6:0] state_reg_right_subtree_state_1 = state_reg_left_subtree_state[6:0]; // @[package.scala:163:13]
wire [1:0] _state_reg_T_1 = _state_reg_T[1:0]; // @[package.scala:163:13]
wire _state_reg_set_left_older_T_2 = _state_reg_T_1[1]; // @[package.scala:163:13]
wire state_reg_set_left_older_2 = ~_state_reg_set_left_older_T_2; // @[Replacement.scala:196:{33,43}]
wire state_reg_left_subtree_state_2 = state_reg_left_subtree_state_1[1]; // @[package.scala:163:13]
wire state_reg_right_subtree_state_2 = state_reg_left_subtree_state_1[0]; // @[package.scala:163:13]
wire _state_reg_T_2 = _state_reg_T_1[0]; // @[package.scala:163:13]
wire _state_reg_T_6 = _state_reg_T_1[0]; // @[package.scala:163:13]
wire _state_reg_T_3 = _state_reg_T_2; // @[package.scala:163:13]
wire _state_reg_T_4 = ~_state_reg_T_3; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_5 = state_reg_set_left_older_2 ? state_reg_left_subtree_state_2 : _state_reg_T_4; // @[package.scala:163:13]
wire _state_reg_T_7 = _state_reg_T_6; // @[Replacement.scala:207:62, :218:17]
wire _state_reg_T_8 = ~_state_reg_T_7; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_9 = state_reg_set_left_older_2 ? _state_reg_T_8 : state_reg_right_subtree_state_2; // @[Replacement.scala:196:33, :198:38, :206:16, :218:7]
wire [1:0] state_reg_hi = {state_reg_set_left_older_2, _state_reg_T_5}; // @[Replacement.scala:196:33, :202:12, :203:16]
wire [2:0] _state_reg_T_10 = {state_reg_hi, _state_reg_T_9}; // @[Replacement.scala:202:12, :206:16]
wire [2:0] _state_reg_T_11 = state_reg_set_left_older_1 ? state_reg_left_subtree_state_1 : _state_reg_T_10; // @[package.scala:163:13]
wire [2:0] _state_reg_T_12 = _state_reg_T[2:0]; // @[package.scala:163:13]
wire _state_reg_set_left_older_T_3 = _state_reg_T_12[2]; // @[Replacement.scala:196:43, :207:62]
wire state_reg_set_left_older_3 = ~_state_reg_set_left_older_T_3; // @[Replacement.scala:196:{33,43}]
wire [2:0] state_reg_left_subtree_state_3 = state_reg_right_subtree_state_1[5:3]; // @[package.scala:163:13]
wire [2:0] state_reg_right_subtree_state_3 = state_reg_right_subtree_state_1[2:0]; // @[Replacement.scala:198:38]
wire [1:0] _state_reg_T_13 = _state_reg_T_12[1:0]; // @[package.scala:163:13]
wire [1:0] _state_reg_T_24 = _state_reg_T_12[1:0]; // @[package.scala:163:13]
wire _state_reg_set_left_older_T_4 = _state_reg_T_13[1]; // @[package.scala:163:13]
wire state_reg_set_left_older_4 = ~_state_reg_set_left_older_T_4; // @[Replacement.scala:196:{33,43}]
wire state_reg_left_subtree_state_4 = state_reg_left_subtree_state_3[1]; // @[package.scala:163:13]
wire state_reg_right_subtree_state_4 = state_reg_left_subtree_state_3[0]; // @[package.scala:163:13]
wire _state_reg_T_14 = _state_reg_T_13[0]; // @[package.scala:163:13]
wire _state_reg_T_18 = _state_reg_T_13[0]; // @[package.scala:163:13]
wire _state_reg_T_15 = _state_reg_T_14; // @[package.scala:163:13]
wire _state_reg_T_16 = ~_state_reg_T_15; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_17 = state_reg_set_left_older_4 ? state_reg_left_subtree_state_4 : _state_reg_T_16; // @[package.scala:163:13]
wire _state_reg_T_19 = _state_reg_T_18; // @[Replacement.scala:207:62, :218:17]
wire _state_reg_T_20 = ~_state_reg_T_19; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_21 = state_reg_set_left_older_4 ? _state_reg_T_20 : state_reg_right_subtree_state_4; // @[Replacement.scala:196:33, :198:38, :206:16, :218:7]
wire [1:0] state_reg_hi_1 = {state_reg_set_left_older_4, _state_reg_T_17}; // @[Replacement.scala:196:33, :202:12, :203:16]
wire [2:0] _state_reg_T_22 = {state_reg_hi_1, _state_reg_T_21}; // @[Replacement.scala:202:12, :206:16]
wire [2:0] _state_reg_T_23 = state_reg_set_left_older_3 ? state_reg_left_subtree_state_3 : _state_reg_T_22; // @[package.scala:163:13]
wire _state_reg_set_left_older_T_5 = _state_reg_T_24[1]; // @[Replacement.scala:196:43, :207:62]
wire state_reg_set_left_older_5 = ~_state_reg_set_left_older_T_5; // @[Replacement.scala:196:{33,43}]
wire state_reg_left_subtree_state_5 = state_reg_right_subtree_state_3[1]; // @[package.scala:163:13]
wire state_reg_right_subtree_state_5 = state_reg_right_subtree_state_3[0]; // @[Replacement.scala:198:38]
wire _state_reg_T_25 = _state_reg_T_24[0]; // @[package.scala:163:13]
wire _state_reg_T_29 = _state_reg_T_24[0]; // @[package.scala:163:13]
wire _state_reg_T_26 = _state_reg_T_25; // @[package.scala:163:13]
wire _state_reg_T_27 = ~_state_reg_T_26; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_28 = state_reg_set_left_older_5 ? state_reg_left_subtree_state_5 : _state_reg_T_27; // @[package.scala:163:13]
wire _state_reg_T_30 = _state_reg_T_29; // @[Replacement.scala:207:62, :218:17]
wire _state_reg_T_31 = ~_state_reg_T_30; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_32 = state_reg_set_left_older_5 ? _state_reg_T_31 : state_reg_right_subtree_state_5; // @[Replacement.scala:196:33, :198:38, :206:16, :218:7]
wire [1:0] state_reg_hi_2 = {state_reg_set_left_older_5, _state_reg_T_28}; // @[Replacement.scala:196:33, :202:12, :203:16]
wire [2:0] _state_reg_T_33 = {state_reg_hi_2, _state_reg_T_32}; // @[Replacement.scala:202:12, :206:16]
wire [2:0] _state_reg_T_34 = state_reg_set_left_older_3 ? _state_reg_T_33 : state_reg_right_subtree_state_3; // @[Replacement.scala:196:33, :198:38, :202:12, :206:16]
wire [3:0] state_reg_hi_3 = {state_reg_set_left_older_3, _state_reg_T_23}; // @[Replacement.scala:196:33, :202:12, :203:16]
wire [6:0] _state_reg_T_35 = {state_reg_hi_3, _state_reg_T_34}; // @[Replacement.scala:202:12, :206:16]
wire [6:0] _state_reg_T_36 = state_reg_set_left_older_1 ? _state_reg_T_35 : state_reg_right_subtree_state_1; // @[Replacement.scala:196:33, :198:38, :202:12, :206:16]
wire [3:0] state_reg_hi_4 = {state_reg_set_left_older_1, _state_reg_T_11}; // @[Replacement.scala:196:33, :202:12, :203:16]
wire [10:0] _state_reg_T_37 = {state_reg_hi_4, _state_reg_T_36}; // @[Replacement.scala:202:12, :206:16]
wire [10:0] _state_reg_T_38 = state_reg_set_left_older ? state_reg_left_subtree_state : _state_reg_T_37; // @[package.scala:163:13]
wire _state_reg_set_left_older_T_6 = _state_reg_T_39[3]; // @[Replacement.scala:196:43, :207:62]
wire state_reg_set_left_older_6 = ~_state_reg_set_left_older_T_6; // @[Replacement.scala:196:{33,43}]
wire [6:0] state_reg_left_subtree_state_6 = state_reg_right_subtree_state[13:7]; // @[package.scala:163:13]
wire [6:0] state_reg_right_subtree_state_6 = state_reg_right_subtree_state[6:0]; // @[Replacement.scala:198:38]
wire [2:0] _state_reg_T_40 = _state_reg_T_39[2:0]; // @[package.scala:163:13]
wire [2:0] _state_reg_T_65 = _state_reg_T_39[2:0]; // @[package.scala:163:13]
wire _state_reg_set_left_older_T_7 = _state_reg_T_40[2]; // @[package.scala:163:13]
wire state_reg_set_left_older_7 = ~_state_reg_set_left_older_T_7; // @[Replacement.scala:196:{33,43}]
wire [2:0] state_reg_left_subtree_state_7 = state_reg_left_subtree_state_6[5:3]; // @[package.scala:163:13]
wire [2:0] state_reg_right_subtree_state_7 = state_reg_left_subtree_state_6[2:0]; // @[package.scala:163:13]
wire [1:0] _state_reg_T_41 = _state_reg_T_40[1:0]; // @[package.scala:163:13]
wire [1:0] _state_reg_T_52 = _state_reg_T_40[1:0]; // @[package.scala:163:13]
wire _state_reg_set_left_older_T_8 = _state_reg_T_41[1]; // @[package.scala:163:13]
wire state_reg_set_left_older_8 = ~_state_reg_set_left_older_T_8; // @[Replacement.scala:196:{33,43}]
wire state_reg_left_subtree_state_8 = state_reg_left_subtree_state_7[1]; // @[package.scala:163:13]
wire state_reg_right_subtree_state_8 = state_reg_left_subtree_state_7[0]; // @[package.scala:163:13]
wire _state_reg_T_42 = _state_reg_T_41[0]; // @[package.scala:163:13]
wire _state_reg_T_46 = _state_reg_T_41[0]; // @[package.scala:163:13]
wire _state_reg_T_43 = _state_reg_T_42; // @[package.scala:163:13]
wire _state_reg_T_44 = ~_state_reg_T_43; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_45 = state_reg_set_left_older_8 ? state_reg_left_subtree_state_8 : _state_reg_T_44; // @[package.scala:163:13]
wire _state_reg_T_47 = _state_reg_T_46; // @[Replacement.scala:207:62, :218:17]
wire _state_reg_T_48 = ~_state_reg_T_47; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_49 = state_reg_set_left_older_8 ? _state_reg_T_48 : state_reg_right_subtree_state_8; // @[Replacement.scala:196:33, :198:38, :206:16, :218:7]
wire [1:0] state_reg_hi_5 = {state_reg_set_left_older_8, _state_reg_T_45}; // @[Replacement.scala:196:33, :202:12, :203:16]
wire [2:0] _state_reg_T_50 = {state_reg_hi_5, _state_reg_T_49}; // @[Replacement.scala:202:12, :206:16]
wire [2:0] _state_reg_T_51 = state_reg_set_left_older_7 ? state_reg_left_subtree_state_7 : _state_reg_T_50; // @[package.scala:163:13]
wire _state_reg_set_left_older_T_9 = _state_reg_T_52[1]; // @[Replacement.scala:196:43, :207:62]
wire state_reg_set_left_older_9 = ~_state_reg_set_left_older_T_9; // @[Replacement.scala:196:{33,43}]
wire state_reg_left_subtree_state_9 = state_reg_right_subtree_state_7[1]; // @[package.scala:163:13]
wire state_reg_right_subtree_state_9 = state_reg_right_subtree_state_7[0]; // @[Replacement.scala:198:38]
wire _state_reg_T_53 = _state_reg_T_52[0]; // @[package.scala:163:13]
wire _state_reg_T_57 = _state_reg_T_52[0]; // @[package.scala:163:13]
wire _state_reg_T_54 = _state_reg_T_53; // @[package.scala:163:13]
wire _state_reg_T_55 = ~_state_reg_T_54; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_56 = state_reg_set_left_older_9 ? state_reg_left_subtree_state_9 : _state_reg_T_55; // @[package.scala:163:13]
wire _state_reg_T_58 = _state_reg_T_57; // @[Replacement.scala:207:62, :218:17]
wire _state_reg_T_59 = ~_state_reg_T_58; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_60 = state_reg_set_left_older_9 ? _state_reg_T_59 : state_reg_right_subtree_state_9; // @[Replacement.scala:196:33, :198:38, :206:16, :218:7]
wire [1:0] state_reg_hi_6 = {state_reg_set_left_older_9, _state_reg_T_56}; // @[Replacement.scala:196:33, :202:12, :203:16]
wire [2:0] _state_reg_T_61 = {state_reg_hi_6, _state_reg_T_60}; // @[Replacement.scala:202:12, :206:16]
wire [2:0] _state_reg_T_62 = state_reg_set_left_older_7 ? _state_reg_T_61 : state_reg_right_subtree_state_7; // @[Replacement.scala:196:33, :198:38, :202:12, :206:16]
wire [3:0] state_reg_hi_7 = {state_reg_set_left_older_7, _state_reg_T_51}; // @[Replacement.scala:196:33, :202:12, :203:16]
wire [6:0] _state_reg_T_63 = {state_reg_hi_7, _state_reg_T_62}; // @[Replacement.scala:202:12, :206:16]
wire [6:0] _state_reg_T_64 = state_reg_set_left_older_6 ? state_reg_left_subtree_state_6 : _state_reg_T_63; // @[package.scala:163:13]
wire _state_reg_set_left_older_T_10 = _state_reg_T_65[2]; // @[Replacement.scala:196:43, :207:62]
wire state_reg_set_left_older_10 = ~_state_reg_set_left_older_T_10; // @[Replacement.scala:196:{33,43}]
wire [2:0] state_reg_left_subtree_state_10 = state_reg_right_subtree_state_6[5:3]; // @[package.scala:163:13]
wire [2:0] state_reg_right_subtree_state_10 = state_reg_right_subtree_state_6[2:0]; // @[Replacement.scala:198:38]
wire [1:0] _state_reg_T_66 = _state_reg_T_65[1:0]; // @[package.scala:163:13]
wire [1:0] _state_reg_T_77 = _state_reg_T_65[1:0]; // @[package.scala:163:13]
wire _state_reg_set_left_older_T_11 = _state_reg_T_66[1]; // @[package.scala:163:13]
wire state_reg_set_left_older_11 = ~_state_reg_set_left_older_T_11; // @[Replacement.scala:196:{33,43}]
wire state_reg_left_subtree_state_11 = state_reg_left_subtree_state_10[1]; // @[package.scala:163:13]
wire state_reg_right_subtree_state_11 = state_reg_left_subtree_state_10[0]; // @[package.scala:163:13]
wire _state_reg_T_67 = _state_reg_T_66[0]; // @[package.scala:163:13]
wire _state_reg_T_71 = _state_reg_T_66[0]; // @[package.scala:163:13]
wire _state_reg_T_68 = _state_reg_T_67; // @[package.scala:163:13]
wire _state_reg_T_69 = ~_state_reg_T_68; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_70 = state_reg_set_left_older_11 ? state_reg_left_subtree_state_11 : _state_reg_T_69; // @[package.scala:163:13]
wire _state_reg_T_72 = _state_reg_T_71; // @[Replacement.scala:207:62, :218:17]
wire _state_reg_T_73 = ~_state_reg_T_72; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_74 = state_reg_set_left_older_11 ? _state_reg_T_73 : state_reg_right_subtree_state_11; // @[Replacement.scala:196:33, :198:38, :206:16, :218:7]
wire [1:0] state_reg_hi_8 = {state_reg_set_left_older_11, _state_reg_T_70}; // @[Replacement.scala:196:33, :202:12, :203:16]
wire [2:0] _state_reg_T_75 = {state_reg_hi_8, _state_reg_T_74}; // @[Replacement.scala:202:12, :206:16]
wire [2:0] _state_reg_T_76 = state_reg_set_left_older_10 ? state_reg_left_subtree_state_10 : _state_reg_T_75; // @[package.scala:163:13]
wire _state_reg_set_left_older_T_12 = _state_reg_T_77[1]; // @[Replacement.scala:196:43, :207:62]
wire state_reg_set_left_older_12 = ~_state_reg_set_left_older_T_12; // @[Replacement.scala:196:{33,43}]
wire state_reg_left_subtree_state_12 = state_reg_right_subtree_state_10[1]; // @[package.scala:163:13]
wire state_reg_right_subtree_state_12 = state_reg_right_subtree_state_10[0]; // @[Replacement.scala:198:38]
wire _state_reg_T_78 = _state_reg_T_77[0]; // @[package.scala:163:13]
wire _state_reg_T_82 = _state_reg_T_77[0]; // @[package.scala:163:13]
wire _state_reg_T_79 = _state_reg_T_78; // @[package.scala:163:13]
wire _state_reg_T_80 = ~_state_reg_T_79; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_81 = state_reg_set_left_older_12 ? state_reg_left_subtree_state_12 : _state_reg_T_80; // @[package.scala:163:13]
wire _state_reg_T_83 = _state_reg_T_82; // @[Replacement.scala:207:62, :218:17]
wire _state_reg_T_84 = ~_state_reg_T_83; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_85 = state_reg_set_left_older_12 ? _state_reg_T_84 : state_reg_right_subtree_state_12; // @[Replacement.scala:196:33, :198:38, :206:16, :218:7]
wire [1:0] state_reg_hi_9 = {state_reg_set_left_older_12, _state_reg_T_81}; // @[Replacement.scala:196:33, :202:12, :203:16]
wire [2:0] _state_reg_T_86 = {state_reg_hi_9, _state_reg_T_85}; // @[Replacement.scala:202:12, :206:16]
wire [2:0] _state_reg_T_87 = state_reg_set_left_older_10 ? _state_reg_T_86 : state_reg_right_subtree_state_10; // @[Replacement.scala:196:33, :198:38, :202:12, :206:16]
wire [3:0] state_reg_hi_10 = {state_reg_set_left_older_10, _state_reg_T_76}; // @[Replacement.scala:196:33, :202:12, :203:16]
wire [6:0] _state_reg_T_88 = {state_reg_hi_10, _state_reg_T_87}; // @[Replacement.scala:202:12, :206:16]
wire [6:0] _state_reg_T_89 = state_reg_set_left_older_6 ? _state_reg_T_88 : state_reg_right_subtree_state_6; // @[Replacement.scala:196:33, :198:38, :202:12, :206:16]
wire [7:0] state_reg_hi_11 = {state_reg_set_left_older_6, _state_reg_T_64}; // @[Replacement.scala:196:33, :202:12, :203:16]
wire [14:0] _state_reg_T_90 = {state_reg_hi_11, _state_reg_T_89}; // @[Replacement.scala:202:12, :206:16]
wire [14:0] _state_reg_T_91 = state_reg_set_left_older ? _state_reg_T_90 : state_reg_right_subtree_state; // @[Replacement.scala:196:33, :198:38, :202:12, :206:16]
wire [11:0] state_reg_hi_12 = {state_reg_set_left_older, _state_reg_T_38}; // @[Replacement.scala:196:33, :202:12, :203:16]
wire [26:0] _state_reg_T_92 = {state_reg_hi_12, _state_reg_T_91}; // @[Replacement.scala:202:12, :206:16]
wire [31:0] mask = 32'h1 << waddr; // @[OneHot.scala:58:35]
wire [12:0] _idxs_T = r_btb_update_bits_pc[13:1]; // @[Valid.scala:135:21]
wire [3:0] _idxPages_T = {1'h0, idxPageUpdate} + 4'h1; // @[OneHot.scala:32:10]
wire [31:0] _isValid_T = {4'h0, isValid} | mask; // @[OneHot.scala:32:14, :58:35]
wire [31:0] _isValid_T_1 = ~mask; // @[OneHot.scala:58:35]
wire [31:0] _isValid_T_2 = {4'h0, _isValid_T_1[27:0] & isValid}; // @[OneHot.scala:32:14]
wire [31:0] _isValid_T_3 = r_btb_update_bits_isValid ? _isValid_T : _isValid_T_2; // @[Valid.scala:135:21]
wire [37:0] _brIdx_T = r_btb_update_bits_br_pc[38:1]; // @[Valid.scala:135:21]
wire _idxWritesEven_T = idxPageUpdate[0]; // @[OneHot.scala:32:10]
wire idxWritesEven = ~_idxWritesEven_T; // @[BTB.scala:274:{25,39}]
wire [7:0] _pageValid_T = {2'h0, pageValid} | tgtPageReplEn; // @[BTB.scala:204:26, :247:26, :284:28]
wire [7:0] _pageValid_T_1 = _pageValid_T | idxPageReplEn; // @[BTB.scala:241:26, :284:{28,44}]
wire [6:0] _io_resp_valid_T = {pageHit, 1'h0}; // @[BTB.scala:214:15, :287:29]
wire _io_resp_valid_T_1 = idxHit[0]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T = idxHit[0]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_97 = idxHit[0]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T = idxHit[0]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T = idxHit[0]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_2 = idxHit[1]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_1 = idxHit[1]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_98 = idxHit[1]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_1 = idxHit[1]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_1 = idxHit[1]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_3 = idxHit[2]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_2 = idxHit[2]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_99 = idxHit[2]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_2 = idxHit[2]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_2 = idxHit[2]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_4 = idxHit[3]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_3 = idxHit[3]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_100 = idxHit[3]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_3 = idxHit[3]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_3 = idxHit[3]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_5 = idxHit[4]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_4 = idxHit[4]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_101 = idxHit[4]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_4 = idxHit[4]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_4 = idxHit[4]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_6 = idxHit[5]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_5 = idxHit[5]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_102 = idxHit[5]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_5 = idxHit[5]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_5 = idxHit[5]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_7 = idxHit[6]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_6 = idxHit[6]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_103 = idxHit[6]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_6 = idxHit[6]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_6 = idxHit[6]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_8 = idxHit[7]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_7 = idxHit[7]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_104 = idxHit[7]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_7 = idxHit[7]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_7 = idxHit[7]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_9 = idxHit[8]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_8 = idxHit[8]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_105 = idxHit[8]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_8 = idxHit[8]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_8 = idxHit[8]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_10 = idxHit[9]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_9 = idxHit[9]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_106 = idxHit[9]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_9 = idxHit[9]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_9 = idxHit[9]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_11 = idxHit[10]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_10 = idxHit[10]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_107 = idxHit[10]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_10 = idxHit[10]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_10 = idxHit[10]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_12 = idxHit[11]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_11 = idxHit[11]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_108 = idxHit[11]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_11 = idxHit[11]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_11 = idxHit[11]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_13 = idxHit[12]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_12 = idxHit[12]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_109 = idxHit[12]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_12 = idxHit[12]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_12 = idxHit[12]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_14 = idxHit[13]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_13 = idxHit[13]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_110 = idxHit[13]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_13 = idxHit[13]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_13 = idxHit[13]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_15 = idxHit[14]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_14 = idxHit[14]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_111 = idxHit[14]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_14 = idxHit[14]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_14 = idxHit[14]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_16 = idxHit[15]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_15 = idxHit[15]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_112 = idxHit[15]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_15 = idxHit[15]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_15 = idxHit[15]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_17 = idxHit[16]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_16 = idxHit[16]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_113 = idxHit[16]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_16 = idxHit[16]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_16 = idxHit[16]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_18 = idxHit[17]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_17 = idxHit[17]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_114 = idxHit[17]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_17 = idxHit[17]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_17 = idxHit[17]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_19 = idxHit[18]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_18 = idxHit[18]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_115 = idxHit[18]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_18 = idxHit[18]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_18 = idxHit[18]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_20 = idxHit[19]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_19 = idxHit[19]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_116 = idxHit[19]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_19 = idxHit[19]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_19 = idxHit[19]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_21 = idxHit[20]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_20 = idxHit[20]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_117 = idxHit[20]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_20 = idxHit[20]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_20 = idxHit[20]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_22 = idxHit[21]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_21 = idxHit[21]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_118 = idxHit[21]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_21 = idxHit[21]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_21 = idxHit[21]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_23 = idxHit[22]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_22 = idxHit[22]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_119 = idxHit[22]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_22 = idxHit[22]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_22 = idxHit[22]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_24 = idxHit[23]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_23 = idxHit[23]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_120 = idxHit[23]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_23 = idxHit[23]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_23 = idxHit[23]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_25 = idxHit[24]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_24 = idxHit[24]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_121 = idxHit[24]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_24 = idxHit[24]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_24 = idxHit[24]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_26 = idxHit[25]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_25 = idxHit[25]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_122 = idxHit[25]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_25 = idxHit[25]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_25 = idxHit[25]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_27 = idxHit[26]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_26 = idxHit[26]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_123 = idxHit[26]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_26 = idxHit[26]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_26 = idxHit[26]; // @[Mux.scala:32:36]
wire _io_resp_valid_T_28 = idxHit[27]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_27 = idxHit[27]; // @[Mux.scala:32:36]
wire _io_resp_bits_target_T_124 = idxHit[27]; // @[Mux.scala:32:36]
wire _io_resp_bits_bridx_T_27 = idxHit[27]; // @[Mux.scala:32:36]
wire _io_resp_bits_cfiType_T_27 = idxHit[27]; // @[Mux.scala:32:36]
wire [2:0] _io_resp_valid_T_29 = _io_resp_valid_T_1 ? idxPages_0 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_30 = _io_resp_valid_T_2 ? idxPages_1 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_31 = _io_resp_valid_T_3 ? idxPages_2 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_32 = _io_resp_valid_T_4 ? idxPages_3 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_33 = _io_resp_valid_T_5 ? idxPages_4 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_34 = _io_resp_valid_T_6 ? idxPages_5 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_35 = _io_resp_valid_T_7 ? idxPages_6 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_36 = _io_resp_valid_T_8 ? idxPages_7 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_37 = _io_resp_valid_T_9 ? idxPages_8 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_38 = _io_resp_valid_T_10 ? idxPages_9 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_39 = _io_resp_valid_T_11 ? idxPages_10 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_40 = _io_resp_valid_T_12 ? idxPages_11 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_41 = _io_resp_valid_T_13 ? idxPages_12 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_42 = _io_resp_valid_T_14 ? idxPages_13 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_43 = _io_resp_valid_T_15 ? idxPages_14 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_44 = _io_resp_valid_T_16 ? idxPages_15 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_45 = _io_resp_valid_T_17 ? idxPages_16 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_46 = _io_resp_valid_T_18 ? idxPages_17 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_47 = _io_resp_valid_T_19 ? idxPages_18 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_48 = _io_resp_valid_T_20 ? idxPages_19 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_49 = _io_resp_valid_T_21 ? idxPages_20 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_50 = _io_resp_valid_T_22 ? idxPages_21 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_51 = _io_resp_valid_T_23 ? idxPages_22 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_52 = _io_resp_valid_T_24 ? idxPages_23 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_53 = _io_resp_valid_T_25 ? idxPages_24 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_54 = _io_resp_valid_T_26 ? idxPages_25 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_55 = _io_resp_valid_T_27 ? idxPages_26 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_56 = _io_resp_valid_T_28 ? idxPages_27 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_valid_T_57 = _io_resp_valid_T_29 | _io_resp_valid_T_30; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_58 = _io_resp_valid_T_57 | _io_resp_valid_T_31; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_59 = _io_resp_valid_T_58 | _io_resp_valid_T_32; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_60 = _io_resp_valid_T_59 | _io_resp_valid_T_33; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_61 = _io_resp_valid_T_60 | _io_resp_valid_T_34; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_62 = _io_resp_valid_T_61 | _io_resp_valid_T_35; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_63 = _io_resp_valid_T_62 | _io_resp_valid_T_36; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_64 = _io_resp_valid_T_63 | _io_resp_valid_T_37; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_65 = _io_resp_valid_T_64 | _io_resp_valid_T_38; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_66 = _io_resp_valid_T_65 | _io_resp_valid_T_39; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_67 = _io_resp_valid_T_66 | _io_resp_valid_T_40; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_68 = _io_resp_valid_T_67 | _io_resp_valid_T_41; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_69 = _io_resp_valid_T_68 | _io_resp_valid_T_42; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_70 = _io_resp_valid_T_69 | _io_resp_valid_T_43; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_71 = _io_resp_valid_T_70 | _io_resp_valid_T_44; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_72 = _io_resp_valid_T_71 | _io_resp_valid_T_45; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_73 = _io_resp_valid_T_72 | _io_resp_valid_T_46; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_74 = _io_resp_valid_T_73 | _io_resp_valid_T_47; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_75 = _io_resp_valid_T_74 | _io_resp_valid_T_48; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_76 = _io_resp_valid_T_75 | _io_resp_valid_T_49; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_77 = _io_resp_valid_T_76 | _io_resp_valid_T_50; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_78 = _io_resp_valid_T_77 | _io_resp_valid_T_51; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_79 = _io_resp_valid_T_78 | _io_resp_valid_T_52; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_80 = _io_resp_valid_T_79 | _io_resp_valid_T_53; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_81 = _io_resp_valid_T_80 | _io_resp_valid_T_54; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_82 = _io_resp_valid_T_81 | _io_resp_valid_T_55; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_T_83 = _io_resp_valid_T_82 | _io_resp_valid_T_56; // @[Mux.scala:30:73]
wire [2:0] _io_resp_valid_WIRE = _io_resp_valid_T_83; // @[Mux.scala:30:73]
wire [6:0] _io_resp_valid_T_84 = _io_resp_valid_T >> _io_resp_valid_WIRE; // @[Mux.scala:30:73]
assign _io_resp_valid_T_85 = _io_resp_valid_T_84[0]; // @[BTB.scala:287:34]
assign io_resp_valid_0 = _io_resp_valid_T_85; // @[BTB.scala:187:7, :287:34]
wire [2:0] _io_resp_bits_target_T_28 = _io_resp_bits_target_T ? tgtPages_0 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_29 = _io_resp_bits_target_T_1 ? tgtPages_1 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_30 = _io_resp_bits_target_T_2 ? tgtPages_2 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_31 = _io_resp_bits_target_T_3 ? tgtPages_3 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_32 = _io_resp_bits_target_T_4 ? tgtPages_4 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_33 = _io_resp_bits_target_T_5 ? tgtPages_5 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_34 = _io_resp_bits_target_T_6 ? tgtPages_6 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_35 = _io_resp_bits_target_T_7 ? tgtPages_7 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_36 = _io_resp_bits_target_T_8 ? tgtPages_8 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_37 = _io_resp_bits_target_T_9 ? tgtPages_9 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_38 = _io_resp_bits_target_T_10 ? tgtPages_10 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_39 = _io_resp_bits_target_T_11 ? tgtPages_11 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_40 = _io_resp_bits_target_T_12 ? tgtPages_12 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_41 = _io_resp_bits_target_T_13 ? tgtPages_13 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_42 = _io_resp_bits_target_T_14 ? tgtPages_14 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_43 = _io_resp_bits_target_T_15 ? tgtPages_15 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_44 = _io_resp_bits_target_T_16 ? tgtPages_16 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_45 = _io_resp_bits_target_T_17 ? tgtPages_17 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_46 = _io_resp_bits_target_T_18 ? tgtPages_18 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_47 = _io_resp_bits_target_T_19 ? tgtPages_19 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_48 = _io_resp_bits_target_T_20 ? tgtPages_20 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_49 = _io_resp_bits_target_T_21 ? tgtPages_21 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_50 = _io_resp_bits_target_T_22 ? tgtPages_22 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_51 = _io_resp_bits_target_T_23 ? tgtPages_23 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_52 = _io_resp_bits_target_T_24 ? tgtPages_24 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_53 = _io_resp_bits_target_T_25 ? tgtPages_25 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_54 = _io_resp_bits_target_T_26 ? tgtPages_26 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_55 = _io_resp_bits_target_T_27 ? tgtPages_27 : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _io_resp_bits_target_T_56 = _io_resp_bits_target_T_28 | _io_resp_bits_target_T_29; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_57 = _io_resp_bits_target_T_56 | _io_resp_bits_target_T_30; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_58 = _io_resp_bits_target_T_57 | _io_resp_bits_target_T_31; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_59 = _io_resp_bits_target_T_58 | _io_resp_bits_target_T_32; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_60 = _io_resp_bits_target_T_59 | _io_resp_bits_target_T_33; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_61 = _io_resp_bits_target_T_60 | _io_resp_bits_target_T_34; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_62 = _io_resp_bits_target_T_61 | _io_resp_bits_target_T_35; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_63 = _io_resp_bits_target_T_62 | _io_resp_bits_target_T_36; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_64 = _io_resp_bits_target_T_63 | _io_resp_bits_target_T_37; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_65 = _io_resp_bits_target_T_64 | _io_resp_bits_target_T_38; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_66 = _io_resp_bits_target_T_65 | _io_resp_bits_target_T_39; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_67 = _io_resp_bits_target_T_66 | _io_resp_bits_target_T_40; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_68 = _io_resp_bits_target_T_67 | _io_resp_bits_target_T_41; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_69 = _io_resp_bits_target_T_68 | _io_resp_bits_target_T_42; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_70 = _io_resp_bits_target_T_69 | _io_resp_bits_target_T_43; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_71 = _io_resp_bits_target_T_70 | _io_resp_bits_target_T_44; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_72 = _io_resp_bits_target_T_71 | _io_resp_bits_target_T_45; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_73 = _io_resp_bits_target_T_72 | _io_resp_bits_target_T_46; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_74 = _io_resp_bits_target_T_73 | _io_resp_bits_target_T_47; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_75 = _io_resp_bits_target_T_74 | _io_resp_bits_target_T_48; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_76 = _io_resp_bits_target_T_75 | _io_resp_bits_target_T_49; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_77 = _io_resp_bits_target_T_76 | _io_resp_bits_target_T_50; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_78 = _io_resp_bits_target_T_77 | _io_resp_bits_target_T_51; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_79 = _io_resp_bits_target_T_78 | _io_resp_bits_target_T_52; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_80 = _io_resp_bits_target_T_79 | _io_resp_bits_target_T_53; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_81 = _io_resp_bits_target_T_80 | _io_resp_bits_target_T_54; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_T_82 = _io_resp_bits_target_T_81 | _io_resp_bits_target_T_55; // @[Mux.scala:30:73]
wire [2:0] _io_resp_bits_target_WIRE = _io_resp_bits_target_T_82; // @[Mux.scala:30:73]
wire _io_resp_bits_target_T_83 = _io_resp_bits_target_WIRE == 3'h1; // @[Mux.scala:30:73]
wire [24:0] _io_resp_bits_target_T_84 = _io_resp_bits_target_T_83 ? pagesMasked_1 : pagesMasked_0; // @[package.scala:39:{76,86}]
wire _io_resp_bits_target_T_85 = _io_resp_bits_target_WIRE == 3'h2; // @[Mux.scala:30:73]
wire [24:0] _io_resp_bits_target_T_86 = _io_resp_bits_target_T_85 ? pagesMasked_2 : _io_resp_bits_target_T_84; // @[package.scala:39:{76,86}]
wire _io_resp_bits_target_T_87 = _io_resp_bits_target_WIRE == 3'h3; // @[Mux.scala:30:73]
wire [24:0] _io_resp_bits_target_T_88 = _io_resp_bits_target_T_87 ? pagesMasked_3 : _io_resp_bits_target_T_86; // @[package.scala:39:{76,86}]
wire _io_resp_bits_target_T_89 = _io_resp_bits_target_WIRE == 3'h4; // @[Mux.scala:30:73]
wire [24:0] _io_resp_bits_target_T_90 = _io_resp_bits_target_T_89 ? pagesMasked_4 : _io_resp_bits_target_T_88; // @[package.scala:39:{76,86}]
wire _io_resp_bits_target_T_91 = _io_resp_bits_target_WIRE == 3'h5; // @[Mux.scala:30:73]
wire [24:0] _io_resp_bits_target_T_92 = _io_resp_bits_target_T_91 ? pagesMasked_5 : _io_resp_bits_target_T_90; // @[package.scala:39:{76,86}]
wire _io_resp_bits_target_T_93 = _io_resp_bits_target_WIRE == 3'h6; // @[Mux.scala:30:73]
wire [24:0] _io_resp_bits_target_T_94 = _io_resp_bits_target_T_93 ? pagesMasked_4 : _io_resp_bits_target_T_92; // @[package.scala:39:{76,86}]
wire _io_resp_bits_target_T_95 = &_io_resp_bits_target_WIRE; // @[Mux.scala:30:73]
wire [24:0] _io_resp_bits_target_T_96 = _io_resp_bits_target_T_95 ? pagesMasked_5 : _io_resp_bits_target_T_94; // @[package.scala:39:{76,86}]
wire [12:0] _io_resp_bits_target_T_125 = _io_resp_bits_target_T_97 ? tgts_0 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_126 = _io_resp_bits_target_T_98 ? tgts_1 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_127 = _io_resp_bits_target_T_99 ? tgts_2 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_128 = _io_resp_bits_target_T_100 ? tgts_3 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_129 = _io_resp_bits_target_T_101 ? tgts_4 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_130 = _io_resp_bits_target_T_102 ? tgts_5 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_131 = _io_resp_bits_target_T_103 ? tgts_6 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_132 = _io_resp_bits_target_T_104 ? tgts_7 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_133 = _io_resp_bits_target_T_105 ? tgts_8 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_134 = _io_resp_bits_target_T_106 ? tgts_9 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_135 = _io_resp_bits_target_T_107 ? tgts_10 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_136 = _io_resp_bits_target_T_108 ? tgts_11 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_137 = _io_resp_bits_target_T_109 ? tgts_12 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_138 = _io_resp_bits_target_T_110 ? tgts_13 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_139 = _io_resp_bits_target_T_111 ? tgts_14 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_140 = _io_resp_bits_target_T_112 ? tgts_15 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_141 = _io_resp_bits_target_T_113 ? tgts_16 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_142 = _io_resp_bits_target_T_114 ? tgts_17 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_143 = _io_resp_bits_target_T_115 ? tgts_18 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_144 = _io_resp_bits_target_T_116 ? tgts_19 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_145 = _io_resp_bits_target_T_117 ? tgts_20 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_146 = _io_resp_bits_target_T_118 ? tgts_21 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_147 = _io_resp_bits_target_T_119 ? tgts_22 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_148 = _io_resp_bits_target_T_120 ? tgts_23 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_149 = _io_resp_bits_target_T_121 ? tgts_24 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_150 = _io_resp_bits_target_T_122 ? tgts_25 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_151 = _io_resp_bits_target_T_123 ? tgts_26 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_152 = _io_resp_bits_target_T_124 ? tgts_27 : 13'h0; // @[Mux.scala:30:73, :32:36]
wire [12:0] _io_resp_bits_target_T_153 = _io_resp_bits_target_T_125 | _io_resp_bits_target_T_126; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_154 = _io_resp_bits_target_T_153 | _io_resp_bits_target_T_127; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_155 = _io_resp_bits_target_T_154 | _io_resp_bits_target_T_128; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_156 = _io_resp_bits_target_T_155 | _io_resp_bits_target_T_129; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_157 = _io_resp_bits_target_T_156 | _io_resp_bits_target_T_130; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_158 = _io_resp_bits_target_T_157 | _io_resp_bits_target_T_131; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_159 = _io_resp_bits_target_T_158 | _io_resp_bits_target_T_132; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_160 = _io_resp_bits_target_T_159 | _io_resp_bits_target_T_133; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_161 = _io_resp_bits_target_T_160 | _io_resp_bits_target_T_134; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_162 = _io_resp_bits_target_T_161 | _io_resp_bits_target_T_135; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_163 = _io_resp_bits_target_T_162 | _io_resp_bits_target_T_136; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_164 = _io_resp_bits_target_T_163 | _io_resp_bits_target_T_137; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_165 = _io_resp_bits_target_T_164 | _io_resp_bits_target_T_138; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_166 = _io_resp_bits_target_T_165 | _io_resp_bits_target_T_139; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_167 = _io_resp_bits_target_T_166 | _io_resp_bits_target_T_140; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_168 = _io_resp_bits_target_T_167 | _io_resp_bits_target_T_141; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_169 = _io_resp_bits_target_T_168 | _io_resp_bits_target_T_142; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_170 = _io_resp_bits_target_T_169 | _io_resp_bits_target_T_143; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_171 = _io_resp_bits_target_T_170 | _io_resp_bits_target_T_144; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_172 = _io_resp_bits_target_T_171 | _io_resp_bits_target_T_145; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_173 = _io_resp_bits_target_T_172 | _io_resp_bits_target_T_146; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_174 = _io_resp_bits_target_T_173 | _io_resp_bits_target_T_147; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_175 = _io_resp_bits_target_T_174 | _io_resp_bits_target_T_148; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_176 = _io_resp_bits_target_T_175 | _io_resp_bits_target_T_149; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_177 = _io_resp_bits_target_T_176 | _io_resp_bits_target_T_150; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_178 = _io_resp_bits_target_T_177 | _io_resp_bits_target_T_151; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_T_179 = _io_resp_bits_target_T_178 | _io_resp_bits_target_T_152; // @[Mux.scala:30:73]
wire [12:0] _io_resp_bits_target_WIRE_1 = _io_resp_bits_target_T_179; // @[Mux.scala:30:73]
wire [13:0] _io_resp_bits_target_T_180 = {_io_resp_bits_target_WIRE_1, 1'h0}; // @[Mux.scala:30:73]
wire [38:0] _io_resp_bits_target_T_181 = {_io_resp_bits_target_T_96, _io_resp_bits_target_T_180}; // @[package.scala:39:76]
wire [11:0] io_resp_bits_entry_hi = idxHit[27:16]; // @[OneHot.scala:30:18]
wire [15:0] io_resp_bits_entry_lo = idxHit[15:0]; // @[OneHot.scala:31:18]
wire _io_resp_bits_entry_T = |io_resp_bits_entry_hi; // @[OneHot.scala:30:18, :32:14]
wire [15:0] _io_resp_bits_entry_T_1 = {4'h0, io_resp_bits_entry_hi} | io_resp_bits_entry_lo; // @[OneHot.scala:30:18, :31:18, :32:{14,28}]
wire [7:0] io_resp_bits_entry_hi_1 = _io_resp_bits_entry_T_1[15:8]; // @[OneHot.scala:30:18, :32:28]
wire [7:0] io_resp_bits_entry_lo_1 = _io_resp_bits_entry_T_1[7:0]; // @[OneHot.scala:31:18, :32:28]
wire _io_resp_bits_entry_T_2 = |io_resp_bits_entry_hi_1; // @[OneHot.scala:30:18, :32:14]
wire [7:0] _io_resp_bits_entry_T_3 = io_resp_bits_entry_hi_1 | io_resp_bits_entry_lo_1; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [3:0] io_resp_bits_entry_hi_2 = _io_resp_bits_entry_T_3[7:4]; // @[OneHot.scala:30:18, :32:28]
wire [3:0] io_resp_bits_entry_lo_2 = _io_resp_bits_entry_T_3[3:0]; // @[OneHot.scala:31:18, :32:28]
wire _io_resp_bits_entry_T_4 = |io_resp_bits_entry_hi_2; // @[OneHot.scala:30:18, :32:14]
wire [3:0] _io_resp_bits_entry_T_5 = io_resp_bits_entry_hi_2 | io_resp_bits_entry_lo_2; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [1:0] io_resp_bits_entry_hi_3 = _io_resp_bits_entry_T_5[3:2]; // @[OneHot.scala:30:18, :32:28]
wire [1:0] io_resp_bits_entry_lo_3 = _io_resp_bits_entry_T_5[1:0]; // @[OneHot.scala:31:18, :32:28]
wire _io_resp_bits_entry_T_6 = |io_resp_bits_entry_hi_3; // @[OneHot.scala:30:18, :32:14]
wire [1:0] _io_resp_bits_entry_T_7 = io_resp_bits_entry_hi_3 | io_resp_bits_entry_lo_3; // @[OneHot.scala:30:18, :31:18, :32:28]
wire _io_resp_bits_entry_T_8 = _io_resp_bits_entry_T_7[1]; // @[OneHot.scala:32:28]
wire [1:0] _io_resp_bits_entry_T_9 = {_io_resp_bits_entry_T_6, _io_resp_bits_entry_T_8}; // @[OneHot.scala:32:{10,14}]
wire [2:0] _io_resp_bits_entry_T_10 = {_io_resp_bits_entry_T_4, _io_resp_bits_entry_T_9}; // @[OneHot.scala:32:{10,14}]
wire [3:0] _io_resp_bits_entry_T_11 = {_io_resp_bits_entry_T_2, _io_resp_bits_entry_T_10}; // @[OneHot.scala:32:{10,14}]
assign _io_resp_bits_entry_T_12 = {_io_resp_bits_entry_T, _io_resp_bits_entry_T_11}; // @[OneHot.scala:32:{10,14}]
assign io_resp_bits_entry_0 = _io_resp_bits_entry_T_12; // @[OneHot.scala:32:10]
wire _io_resp_bits_bridx_T_28 = _io_resp_bits_bridx_T & brIdx_0; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_29 = _io_resp_bits_bridx_T_1 & brIdx_1; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_30 = _io_resp_bits_bridx_T_2 & brIdx_2; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_31 = _io_resp_bits_bridx_T_3 & brIdx_3; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_32 = _io_resp_bits_bridx_T_4 & brIdx_4; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_33 = _io_resp_bits_bridx_T_5 & brIdx_5; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_34 = _io_resp_bits_bridx_T_6 & brIdx_6; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_35 = _io_resp_bits_bridx_T_7 & brIdx_7; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_36 = _io_resp_bits_bridx_T_8 & brIdx_8; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_37 = _io_resp_bits_bridx_T_9 & brIdx_9; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_38 = _io_resp_bits_bridx_T_10 & brIdx_10; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_39 = _io_resp_bits_bridx_T_11 & brIdx_11; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_40 = _io_resp_bits_bridx_T_12 & brIdx_12; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_41 = _io_resp_bits_bridx_T_13 & brIdx_13; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_42 = _io_resp_bits_bridx_T_14 & brIdx_14; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_43 = _io_resp_bits_bridx_T_15 & brIdx_15; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_44 = _io_resp_bits_bridx_T_16 & brIdx_16; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_45 = _io_resp_bits_bridx_T_17 & brIdx_17; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_46 = _io_resp_bits_bridx_T_18 & brIdx_18; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_47 = _io_resp_bits_bridx_T_19 & brIdx_19; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_48 = _io_resp_bits_bridx_T_20 & brIdx_20; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_49 = _io_resp_bits_bridx_T_21 & brIdx_21; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_50 = _io_resp_bits_bridx_T_22 & brIdx_22; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_51 = _io_resp_bits_bridx_T_23 & brIdx_23; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_52 = _io_resp_bits_bridx_T_24 & brIdx_24; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_53 = _io_resp_bits_bridx_T_25 & brIdx_25; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_54 = _io_resp_bits_bridx_T_26 & brIdx_26; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_55 = _io_resp_bits_bridx_T_27 & brIdx_27; // @[Mux.scala:30:73, :32:36]
wire _io_resp_bits_bridx_T_56 = _io_resp_bits_bridx_T_28 | _io_resp_bits_bridx_T_29; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_57 = _io_resp_bits_bridx_T_56 | _io_resp_bits_bridx_T_30; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_58 = _io_resp_bits_bridx_T_57 | _io_resp_bits_bridx_T_31; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_59 = _io_resp_bits_bridx_T_58 | _io_resp_bits_bridx_T_32; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_60 = _io_resp_bits_bridx_T_59 | _io_resp_bits_bridx_T_33; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_61 = _io_resp_bits_bridx_T_60 | _io_resp_bits_bridx_T_34; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_62 = _io_resp_bits_bridx_T_61 | _io_resp_bits_bridx_T_35; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_63 = _io_resp_bits_bridx_T_62 | _io_resp_bits_bridx_T_36; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_64 = _io_resp_bits_bridx_T_63 | _io_resp_bits_bridx_T_37; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_65 = _io_resp_bits_bridx_T_64 | _io_resp_bits_bridx_T_38; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_66 = _io_resp_bits_bridx_T_65 | _io_resp_bits_bridx_T_39; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_67 = _io_resp_bits_bridx_T_66 | _io_resp_bits_bridx_T_40; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_68 = _io_resp_bits_bridx_T_67 | _io_resp_bits_bridx_T_41; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_69 = _io_resp_bits_bridx_T_68 | _io_resp_bits_bridx_T_42; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_70 = _io_resp_bits_bridx_T_69 | _io_resp_bits_bridx_T_43; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_71 = _io_resp_bits_bridx_T_70 | _io_resp_bits_bridx_T_44; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_72 = _io_resp_bits_bridx_T_71 | _io_resp_bits_bridx_T_45; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_73 = _io_resp_bits_bridx_T_72 | _io_resp_bits_bridx_T_46; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_74 = _io_resp_bits_bridx_T_73 | _io_resp_bits_bridx_T_47; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_75 = _io_resp_bits_bridx_T_74 | _io_resp_bits_bridx_T_48; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_76 = _io_resp_bits_bridx_T_75 | _io_resp_bits_bridx_T_49; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_77 = _io_resp_bits_bridx_T_76 | _io_resp_bits_bridx_T_50; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_78 = _io_resp_bits_bridx_T_77 | _io_resp_bits_bridx_T_51; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_79 = _io_resp_bits_bridx_T_78 | _io_resp_bits_bridx_T_52; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_80 = _io_resp_bits_bridx_T_79 | _io_resp_bits_bridx_T_53; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_81 = _io_resp_bits_bridx_T_80 | _io_resp_bits_bridx_T_54; // @[Mux.scala:30:73]
wire _io_resp_bits_bridx_T_82 = _io_resp_bits_bridx_T_81 | _io_resp_bits_bridx_T_55; // @[Mux.scala:30:73]
assign _io_resp_bits_bridx_WIRE = _io_resp_bits_bridx_T_82; // @[Mux.scala:30:73]
assign io_resp_bits_bridx_0 = _io_resp_bits_bridx_WIRE; // @[Mux.scala:30:73]
wire _io_resp_bits_mask_T = ~io_resp_bits_bridx_0; // @[BTB.scala:187:7, :292:61]
wire _io_resp_bits_mask_T_1 = io_resp_bits_taken_0 & _io_resp_bits_mask_T; // @[BTB.scala:187:7, :292:{40,61}]
wire _io_resp_bits_mask_T_2 = ~_io_resp_bits_mask_T_1; // @[BTB.scala:292:{36,40}]
wire [1:0] _io_resp_bits_mask_T_3 = 2'h1 << _io_resp_bits_mask_T_2; // @[BTB.scala:292:{33,36}]
wire [2:0] _io_resp_bits_mask_T_4 = {1'h0, _io_resp_bits_mask_T_3} - 3'h1; // @[BTB.scala:292:{33,87}]
wire [1:0] _io_resp_bits_mask_T_5 = _io_resp_bits_mask_T_4[1:0]; // @[BTB.scala:292:87]
wire [2:0] _io_resp_bits_mask_T_6 = {_io_resp_bits_mask_T_5, 1'h1}; // @[BTB.scala:187:7, :292:{27,87}]
assign io_resp_bits_mask_0 = _io_resp_bits_mask_T_6[1:0]; // @[BTB.scala:187:7, :292:{21,27}]
wire [1:0] _io_resp_bits_cfiType_T_28 = _io_resp_bits_cfiType_T ? cfiType_0 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_29 = _io_resp_bits_cfiType_T_1 ? cfiType_1 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_30 = _io_resp_bits_cfiType_T_2 ? cfiType_2 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_31 = _io_resp_bits_cfiType_T_3 ? cfiType_3 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_32 = _io_resp_bits_cfiType_T_4 ? cfiType_4 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_33 = _io_resp_bits_cfiType_T_5 ? cfiType_5 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_34 = _io_resp_bits_cfiType_T_6 ? cfiType_6 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_35 = _io_resp_bits_cfiType_T_7 ? cfiType_7 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_36 = _io_resp_bits_cfiType_T_8 ? cfiType_8 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_37 = _io_resp_bits_cfiType_T_9 ? cfiType_9 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_38 = _io_resp_bits_cfiType_T_10 ? cfiType_10 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_39 = _io_resp_bits_cfiType_T_11 ? cfiType_11 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_40 = _io_resp_bits_cfiType_T_12 ? cfiType_12 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_41 = _io_resp_bits_cfiType_T_13 ? cfiType_13 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_42 = _io_resp_bits_cfiType_T_14 ? cfiType_14 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_43 = _io_resp_bits_cfiType_T_15 ? cfiType_15 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_44 = _io_resp_bits_cfiType_T_16 ? cfiType_16 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_45 = _io_resp_bits_cfiType_T_17 ? cfiType_17 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_46 = _io_resp_bits_cfiType_T_18 ? cfiType_18 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_47 = _io_resp_bits_cfiType_T_19 ? cfiType_19 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_48 = _io_resp_bits_cfiType_T_20 ? cfiType_20 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_49 = _io_resp_bits_cfiType_T_21 ? cfiType_21 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_50 = _io_resp_bits_cfiType_T_22 ? cfiType_22 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_51 = _io_resp_bits_cfiType_T_23 ? cfiType_23 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_52 = _io_resp_bits_cfiType_T_24 ? cfiType_24 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_53 = _io_resp_bits_cfiType_T_25 ? cfiType_25 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_54 = _io_resp_bits_cfiType_T_26 ? cfiType_26 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_55 = _io_resp_bits_cfiType_T_27 ? cfiType_27 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _io_resp_bits_cfiType_T_56 = _io_resp_bits_cfiType_T_28 | _io_resp_bits_cfiType_T_29; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_57 = _io_resp_bits_cfiType_T_56 | _io_resp_bits_cfiType_T_30; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_58 = _io_resp_bits_cfiType_T_57 | _io_resp_bits_cfiType_T_31; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_59 = _io_resp_bits_cfiType_T_58 | _io_resp_bits_cfiType_T_32; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_60 = _io_resp_bits_cfiType_T_59 | _io_resp_bits_cfiType_T_33; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_61 = _io_resp_bits_cfiType_T_60 | _io_resp_bits_cfiType_T_34; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_62 = _io_resp_bits_cfiType_T_61 | _io_resp_bits_cfiType_T_35; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_63 = _io_resp_bits_cfiType_T_62 | _io_resp_bits_cfiType_T_36; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_64 = _io_resp_bits_cfiType_T_63 | _io_resp_bits_cfiType_T_37; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_65 = _io_resp_bits_cfiType_T_64 | _io_resp_bits_cfiType_T_38; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_66 = _io_resp_bits_cfiType_T_65 | _io_resp_bits_cfiType_T_39; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_67 = _io_resp_bits_cfiType_T_66 | _io_resp_bits_cfiType_T_40; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_68 = _io_resp_bits_cfiType_T_67 | _io_resp_bits_cfiType_T_41; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_69 = _io_resp_bits_cfiType_T_68 | _io_resp_bits_cfiType_T_42; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_70 = _io_resp_bits_cfiType_T_69 | _io_resp_bits_cfiType_T_43; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_71 = _io_resp_bits_cfiType_T_70 | _io_resp_bits_cfiType_T_44; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_72 = _io_resp_bits_cfiType_T_71 | _io_resp_bits_cfiType_T_45; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_73 = _io_resp_bits_cfiType_T_72 | _io_resp_bits_cfiType_T_46; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_74 = _io_resp_bits_cfiType_T_73 | _io_resp_bits_cfiType_T_47; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_75 = _io_resp_bits_cfiType_T_74 | _io_resp_bits_cfiType_T_48; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_76 = _io_resp_bits_cfiType_T_75 | _io_resp_bits_cfiType_T_49; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_77 = _io_resp_bits_cfiType_T_76 | _io_resp_bits_cfiType_T_50; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_78 = _io_resp_bits_cfiType_T_77 | _io_resp_bits_cfiType_T_51; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_79 = _io_resp_bits_cfiType_T_78 | _io_resp_bits_cfiType_T_52; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_80 = _io_resp_bits_cfiType_T_79 | _io_resp_bits_cfiType_T_53; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_81 = _io_resp_bits_cfiType_T_80 | _io_resp_bits_cfiType_T_54; // @[Mux.scala:30:73]
wire [1:0] _io_resp_bits_cfiType_T_82 = _io_resp_bits_cfiType_T_81 | _io_resp_bits_cfiType_T_55; // @[Mux.scala:30:73]
assign _io_resp_bits_cfiType_WIRE = _io_resp_bits_cfiType_T_82; // @[Mux.scala:30:73]
assign io_resp_bits_cfiType_0 = _io_resp_bits_cfiType_WIRE; // @[Mux.scala:30:73]
wire leftOne = idxHit[0]; // @[Misc.scala:178:18, :181:37]
wire leftOne_1 = idxHit[1]; // @[Misc.scala:178:18, :181:37, :182:39]
wire rightOne = idxHit[2]; // @[Misc.scala:178:18, :181:37, :182:39]
wire rightOne_1 = leftOne_1 | rightOne; // @[Misc.scala:178:18, :183:16]
wire rightTwo = leftOne_1 & rightOne; // @[Misc.scala:178:18, :183:{49,61}]
wire leftOne_2 = leftOne | rightOne_1; // @[Misc.scala:178:18, :183:16]
wire leftTwo = rightTwo | leftOne & rightOne_1; // @[Misc.scala:178:18, :183:{16,49,61}]
wire leftOne_3 = idxHit[3]; // @[Misc.scala:178:18, :181:37, :182:39]
wire rightOne_2 = idxHit[4]; // @[Misc.scala:178:18, :181:37, :182:39]
wire leftOne_4 = leftOne_3 | rightOne_2; // @[Misc.scala:178:18, :183:16]
wire leftTwo_1 = leftOne_3 & rightOne_2; // @[Misc.scala:178:18, :183:{49,61}]
wire leftOne_5 = idxHit[5]; // @[Misc.scala:178:18, :181:37, :182:39]
wire rightOne_3 = idxHit[6]; // @[Misc.scala:178:18, :181:37, :182:39]
wire rightOne_4 = leftOne_5 | rightOne_3; // @[Misc.scala:178:18, :183:16]
wire rightTwo_1 = leftOne_5 & rightOne_3; // @[Misc.scala:178:18, :183:{49,61}]
wire rightOne_5 = leftOne_4 | rightOne_4; // @[Misc.scala:183:16]
wire rightTwo_2 = leftTwo_1 | rightTwo_1 | leftOne_4 & rightOne_4; // @[Misc.scala:183:{16,37,49,61}]
wire leftOne_6 = leftOne_2 | rightOne_5; // @[Misc.scala:183:16]
wire leftTwo_2 = leftTwo | rightTwo_2 | leftOne_2 & rightOne_5; // @[Misc.scala:183:{16,37,49,61}]
wire leftOne_7 = idxHit[7]; // @[Misc.scala:178:18, :181:37, :182:39]
wire leftOne_8 = idxHit[8]; // @[Misc.scala:178:18, :181:37, :182:39]
wire rightOne_6 = idxHit[9]; // @[Misc.scala:178:18, :181:37, :182:39]
wire rightOne_7 = leftOne_8 | rightOne_6; // @[Misc.scala:178:18, :183:16]
wire rightTwo_3 = leftOne_8 & rightOne_6; // @[Misc.scala:178:18, :183:{49,61}]
wire leftOne_9 = leftOne_7 | rightOne_7; // @[Misc.scala:178:18, :183:16]
wire leftTwo_3 = rightTwo_3 | leftOne_7 & rightOne_7; // @[Misc.scala:178:18, :183:{16,49,61}]
wire leftOne_10 = idxHit[10]; // @[Misc.scala:178:18, :181:37, :182:39]
wire rightOne_8 = idxHit[11]; // @[Misc.scala:178:18, :181:37, :182:39]
wire leftOne_11 = leftOne_10 | rightOne_8; // @[Misc.scala:178:18, :183:16]
wire leftTwo_4 = leftOne_10 & rightOne_8; // @[Misc.scala:178:18, :183:{49,61}]
wire leftOne_12 = idxHit[12]; // @[Misc.scala:178:18, :181:37, :182:39]
wire rightOne_9 = idxHit[13]; // @[Misc.scala:178:18, :181:37, :182:39]
wire rightOne_10 = leftOne_12 | rightOne_9; // @[Misc.scala:178:18, :183:16]
wire rightTwo_4 = leftOne_12 & rightOne_9; // @[Misc.scala:178:18, :183:{49,61}]
wire rightOne_11 = leftOne_11 | rightOne_10; // @[Misc.scala:183:16]
wire rightTwo_5 = leftTwo_4 | rightTwo_4 | leftOne_11 & rightOne_10; // @[Misc.scala:183:{16,37,49,61}]
wire rightOne_12 = leftOne_9 | rightOne_11; // @[Misc.scala:183:16]
wire rightTwo_6 = leftTwo_3 | rightTwo_5 | leftOne_9 & rightOne_11; // @[Misc.scala:183:{16,37,49,61}]
wire leftOne_13 = leftOne_6 | rightOne_12; // @[Misc.scala:183:16]
wire leftTwo_5 = leftTwo_2 | rightTwo_6 | leftOne_6 & rightOne_12; // @[Misc.scala:183:{16,37,49,61}]
wire leftOne_14 = idxHit[14]; // @[Misc.scala:178:18, :181:37, :182:39]
wire leftOne_15 = idxHit[15]; // @[Misc.scala:178:18, :181:37, :182:39]
wire rightOne_13 = idxHit[16]; // @[Misc.scala:178:18, :181:37, :182:39]
wire rightOne_14 = leftOne_15 | rightOne_13; // @[Misc.scala:178:18, :183:16]
wire rightTwo_7 = leftOne_15 & rightOne_13; // @[Misc.scala:178:18, :183:{49,61}]
wire leftOne_16 = leftOne_14 | rightOne_14; // @[Misc.scala:178:18, :183:16]
wire leftTwo_6 = rightTwo_7 | leftOne_14 & rightOne_14; // @[Misc.scala:178:18, :183:{16,49,61}]
wire leftOne_17 = idxHit[17]; // @[Misc.scala:178:18, :181:37, :182:39]
wire rightOne_15 = idxHit[18]; // @[Misc.scala:178:18, :181:37, :182:39]
wire leftOne_18 = leftOne_17 | rightOne_15; // @[Misc.scala:178:18, :183:16]
wire leftTwo_7 = leftOne_17 & rightOne_15; // @[Misc.scala:178:18, :183:{49,61}]
wire leftOne_19 = idxHit[19]; // @[Misc.scala:178:18, :181:37, :182:39]
wire rightOne_16 = idxHit[20]; // @[Misc.scala:178:18, :181:37, :182:39]
wire rightOne_17 = leftOne_19 | rightOne_16; // @[Misc.scala:178:18, :183:16]
wire rightTwo_8 = leftOne_19 & rightOne_16; // @[Misc.scala:178:18, :183:{49,61}]
wire rightOne_18 = leftOne_18 | rightOne_17; // @[Misc.scala:183:16]
wire rightTwo_9 = leftTwo_7 | rightTwo_8 | leftOne_18 & rightOne_17; // @[Misc.scala:183:{16,37,49,61}]
wire leftOne_20 = leftOne_16 | rightOne_18; // @[Misc.scala:183:16]
wire leftTwo_8 = leftTwo_6 | rightTwo_9 | leftOne_16 & rightOne_18; // @[Misc.scala:183:{16,37,49,61}]
wire leftOne_21 = idxHit[21]; // @[Misc.scala:178:18, :181:37, :182:39]
wire leftOne_22 = idxHit[22]; // @[Misc.scala:178:18, :181:37, :182:39]
wire rightOne_19 = idxHit[23]; // @[Misc.scala:178:18, :181:37, :182:39]
wire rightOne_20 = leftOne_22 | rightOne_19; // @[Misc.scala:178:18, :183:16]
wire rightTwo_10 = leftOne_22 & rightOne_19; // @[Misc.scala:178:18, :183:{49,61}]
wire leftOne_23 = leftOne_21 | rightOne_20; // @[Misc.scala:178:18, :183:16]
wire leftTwo_9 = rightTwo_10 | leftOne_21 & rightOne_20; // @[Misc.scala:178:18, :183:{16,49,61}]
wire leftOne_24 = idxHit[24]; // @[Misc.scala:178:18, :181:37, :182:39]
wire rightOne_21 = idxHit[25]; // @[Misc.scala:178:18, :181:37, :182:39]
wire leftOne_25 = leftOne_24 | rightOne_21; // @[Misc.scala:178:18, :183:16]
wire leftTwo_10 = leftOne_24 & rightOne_21; // @[Misc.scala:178:18, :183:{49,61}]
wire leftOne_26 = idxHit[26]; // @[Misc.scala:178:18, :181:37, :182:39]
wire rightOne_22 = idxHit[27]; // @[Misc.scala:178:18, :182:39]
wire rightOne_23 = leftOne_26 | rightOne_22; // @[Misc.scala:178:18, :183:16]
wire rightTwo_11 = leftOne_26 & rightOne_22; // @[Misc.scala:178:18, :183:{49,61}]
wire rightOne_24 = leftOne_25 | rightOne_23; // @[Misc.scala:183:16]
wire rightTwo_12 = leftTwo_10 | rightTwo_11 | leftOne_25 & rightOne_23; // @[Misc.scala:183:{16,37,49,61}]
wire rightOne_25 = leftOne_23 | rightOne_24; // @[Misc.scala:183:16]
wire rightTwo_13 = leftTwo_9 | rightTwo_12 | leftOne_23 & rightOne_24; // @[Misc.scala:183:{16,37,49,61}]
wire rightOne_26 = leftOne_20 | rightOne_25; // @[Misc.scala:183:16]
wire rightTwo_14 = leftTwo_8 | rightTwo_13 | leftOne_20 & rightOne_25; // @[Misc.scala:183:{16,37,49,61}]
wire [27:0] _isValid_T_4 = ~idxHit; // @[BTB.scala:218:32, :297:26]
wire [27:0] _isValid_T_5 = isValid & _isValid_T_4; // @[BTB.scala:207:24, :297:{24,26}]
reg [7:0] history; // @[BTB.scala:117:24]
assign res_history = history; // @[BTB.scala:91:19, :117:24]
reg [9:0] reset_waddr; // @[BTB.scala:119:36]
wire _resetting_T = reset_waddr[9]; // @[BTB.scala:119:36, :120:39]
wire resetting = ~_resetting_T; // @[BTB.scala:120:{27,39}]
wire wen; // @[BTB.scala:121:29]
wire [9:0] waddr_1; // @[BTB.scala:122:31]
wire wdata; // @[BTB.scala:123:31]
wire [10:0] _reset_waddr_T = {1'h0, reset_waddr} + 11'h1; // @[BTB.scala:119:36, :124:49]
wire [9:0] _reset_waddr_T_1 = _reset_waddr_T[9:0]; // @[BTB.scala:124:49]
wire _isBranch_T = cfiType_0 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_1 = cfiType_1 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_2 = cfiType_2 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_3 = cfiType_3 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_4 = cfiType_4 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_5 = cfiType_5 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_6 = cfiType_6 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_7 = cfiType_7 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_8 = cfiType_8 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_9 = cfiType_9 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_10 = cfiType_10 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_11 = cfiType_11 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_12 = cfiType_12 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_13 = cfiType_13 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_14 = cfiType_14 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_15 = cfiType_15 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_16 = cfiType_16 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_17 = cfiType_17 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_18 = cfiType_18 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_19 = cfiType_19 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_20 = cfiType_20 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_21 = cfiType_21 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_22 = cfiType_22 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_23 = cfiType_23 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_24 = cfiType_24 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_25 = cfiType_25 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_26 = cfiType_26 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire _isBranch_T_27 = cfiType_27 == 2'h0; // @[BTB.scala:208:20, :305:44]
wire [1:0] isBranch_lo_lo_lo_hi = {_isBranch_T_2, _isBranch_T_1}; // @[package.scala:45:27]
wire [2:0] isBranch_lo_lo_lo = {isBranch_lo_lo_lo_hi, _isBranch_T}; // @[package.scala:45:27]
wire [1:0] isBranch_lo_lo_hi_lo = {_isBranch_T_4, _isBranch_T_3}; // @[package.scala:45:27]
wire [1:0] isBranch_lo_lo_hi_hi = {_isBranch_T_6, _isBranch_T_5}; // @[package.scala:45:27]
wire [3:0] isBranch_lo_lo_hi = {isBranch_lo_lo_hi_hi, isBranch_lo_lo_hi_lo}; // @[package.scala:45:27]
wire [6:0] isBranch_lo_lo = {isBranch_lo_lo_hi, isBranch_lo_lo_lo}; // @[package.scala:45:27]
wire [1:0] isBranch_lo_hi_lo_hi = {_isBranch_T_9, _isBranch_T_8}; // @[package.scala:45:27]
wire [2:0] isBranch_lo_hi_lo = {isBranch_lo_hi_lo_hi, _isBranch_T_7}; // @[package.scala:45:27]
wire [1:0] isBranch_lo_hi_hi_lo = {_isBranch_T_11, _isBranch_T_10}; // @[package.scala:45:27]
wire [1:0] isBranch_lo_hi_hi_hi = {_isBranch_T_13, _isBranch_T_12}; // @[package.scala:45:27]
wire [3:0] isBranch_lo_hi_hi = {isBranch_lo_hi_hi_hi, isBranch_lo_hi_hi_lo}; // @[package.scala:45:27]
wire [6:0] isBranch_lo_hi = {isBranch_lo_hi_hi, isBranch_lo_hi_lo}; // @[package.scala:45:27]
wire [13:0] isBranch_lo = {isBranch_lo_hi, isBranch_lo_lo}; // @[package.scala:45:27]
wire [1:0] isBranch_hi_lo_lo_hi = {_isBranch_T_16, _isBranch_T_15}; // @[package.scala:45:27]
wire [2:0] isBranch_hi_lo_lo = {isBranch_hi_lo_lo_hi, _isBranch_T_14}; // @[package.scala:45:27]
wire [1:0] isBranch_hi_lo_hi_lo = {_isBranch_T_18, _isBranch_T_17}; // @[package.scala:45:27]
wire [1:0] isBranch_hi_lo_hi_hi = {_isBranch_T_20, _isBranch_T_19}; // @[package.scala:45:27]
wire [3:0] isBranch_hi_lo_hi = {isBranch_hi_lo_hi_hi, isBranch_hi_lo_hi_lo}; // @[package.scala:45:27]
wire [6:0] isBranch_hi_lo = {isBranch_hi_lo_hi, isBranch_hi_lo_lo}; // @[package.scala:45:27]
wire [1:0] isBranch_hi_hi_lo_hi = {_isBranch_T_23, _isBranch_T_22}; // @[package.scala:45:27]
wire [2:0] isBranch_hi_hi_lo = {isBranch_hi_hi_lo_hi, _isBranch_T_21}; // @[package.scala:45:27]
wire [1:0] isBranch_hi_hi_hi_lo = {_isBranch_T_25, _isBranch_T_24}; // @[package.scala:45:27]
wire [1:0] isBranch_hi_hi_hi_hi = {_isBranch_T_27, _isBranch_T_26}; // @[package.scala:45:27]
wire [3:0] isBranch_hi_hi_hi = {isBranch_hi_hi_hi_hi, isBranch_hi_hi_hi_lo}; // @[package.scala:45:27]
wire [6:0] isBranch_hi_hi = {isBranch_hi_hi_hi, isBranch_hi_hi_lo}; // @[package.scala:45:27]
wire [13:0] isBranch_hi = {isBranch_hi_hi, isBranch_hi_lo}; // @[package.scala:45:27]
wire [27:0] _isBranch_T_28 = {isBranch_hi, isBranch_lo}; // @[package.scala:45:27]
wire [27:0] _isBranch_T_29 = idxHit & _isBranch_T_28; // @[package.scala:45:27]
wire isBranch = |_isBranch_T_29; // @[BTB.scala:305:{28,72}]
assign io_resp_bits_bht_history_0 = res_history; // @[BTB.scala:91:19, :187:7]
wire _res_res_value_T_8; // @[BTB.scala:92:21]
assign io_resp_bits_bht_value_0 = res_value; // @[BTB.scala:91:19, :187:7]
wire [36:0] res_res_value_hi = io_req_bits_addr_0[38:2]; // @[BTB.scala:85:21, :187:7]
wire [8:0] _res_res_value_T = res_res_value_hi[8:0]; // @[BTB.scala:85:21, :86:9]
wire [27:0] _res_res_value_T_1 = res_res_value_hi[36:9]; // @[BTB.scala:85:21, :86:48]
wire [1:0] _res_res_value_T_2 = _res_res_value_T_1[1:0]; // @[BTB.scala:86:{48,77}]
wire [8:0] _res_res_value_T_3 = {_res_res_value_T[8:2], _res_res_value_T[1:0] ^ _res_res_value_T_2}; // @[BTB.scala:86:{9,42,77}]
wire [15:0] _res_res_value_T_4 = {8'h0, history} * 16'hDD; // @[BTB.scala:82:12, :117:24]
wire [2:0] _res_res_value_T_5 = _res_res_value_T_4[7:5]; // @[BTB.scala:82:{12,19}]
wire [8:0] _res_res_value_T_6 = {_res_res_value_T_5, 6'h0}; // @[BTB.scala:82:19, :88:44]
wire [8:0] _res_res_value_T_7 = _res_res_value_T_3 ^ _res_res_value_T_6; // @[BTB.scala:86:42, :88:{20,44}]
assign _res_res_value_T_8 = ~resetting & _table_ext_R0_data; // @[BTB.scala:92:21, :116:26, :120:27]
assign res_value = _res_res_value_T_8; // @[BTB.scala:91:19, :92:21]
wire [6:0] _history_T = history[7:1]; // @[BTB.scala:113:35, :117:24]
wire [7:0] _history_T_1 = {io_bht_advance_bits_bht_value_0, _history_T}; // @[BTB.scala:113:{19,35}, :187:7]
wire _GEN = io_bht_update_valid_0 & io_bht_update_bits_branch_0; // @[BTB.scala:97:9, :121:29, :187:7, :310:32, :311:40]
assign wen = _GEN | resetting; // @[BTB.scala:97:9, :120:27, :121:29, :310:32, :311:40]
wire [36:0] waddr_hi = io_bht_update_bits_pc_0[38:2]; // @[BTB.scala:85:21, :187:7]
wire [8:0] _waddr_T_40 = waddr_hi[8:0]; // @[BTB.scala:85:21, :86:9]
wire [27:0] _waddr_T_41 = waddr_hi[36:9]; // @[BTB.scala:85:21, :86:48]
wire [1:0] _waddr_T_42 = _waddr_T_41[1:0]; // @[BTB.scala:86:{48,77}]
wire [8:0] _waddr_T_43 = {_waddr_T_40[8:2], _waddr_T_40[1:0] ^ _waddr_T_42}; // @[BTB.scala:86:{9,42,77}]
wire [15:0] _waddr_T_44 = {8'h0, io_bht_update_bits_prediction_history_0} * 16'hDD; // @[BTB.scala:82:12, :187:7]
wire [2:0] _waddr_T_45 = _waddr_T_44[7:5]; // @[BTB.scala:82:{12,19}]
wire [8:0] _waddr_T_46 = {_waddr_T_45, 6'h0}; // @[BTB.scala:82:19, :88:44]
wire [8:0] _waddr_T_47 = _waddr_T_43 ^ _waddr_T_46; // @[BTB.scala:86:42, :88:{20,44}]
assign waddr_1 = io_bht_update_valid_0 & io_bht_update_bits_branch_0 & ~resetting ? {1'h0, _waddr_T_47} : reset_waddr; // @[BTB.scala:88:20, :98:{11,23}, :99:13, :119:36, :120:27, :122:31, :187:7, :310:32, :311:40]
assign wdata = _GEN & ~resetting & io_bht_update_bits_taken_0; // @[BTB.scala:97:9, :98:{11,23}, :100:13, :120:27, :121:29, :123:31, :187:7, :310:32, :311:40]
wire [6:0] _history_T_2 = io_bht_update_bits_prediction_history_0[7:1]; // @[BTB.scala:110:37, :187:7]
wire [7:0] _history_T_3 = {io_bht_update_bits_taken_0, _history_T_2}; // @[BTB.scala:110:{19,37}, :187:7]
assign io_resp_bits_taken_0 = ~(~res_value & isBranch); // @[BTB.scala:91:19, :187:7, :288:22, :305:72, :320:{11,22,35,56}]
reg [2:0] count; // @[BTB.scala:56:30]
reg [2:0] pos; // @[BTB.scala:57:28]
reg [38:0] stack_0; // @[BTB.scala:58:26]
reg [38:0] stack_1; // @[BTB.scala:58:26]
reg [38:0] stack_2; // @[BTB.scala:58:26]
reg [38:0] stack_3; // @[BTB.scala:58:26]
reg [38:0] stack_4; // @[BTB.scala:58:26]
reg [38:0] stack_5; // @[BTB.scala:58:26]
wire _doPeek_T = &cfiType_0; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_1 = &cfiType_1; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_2 = &cfiType_2; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_3 = &cfiType_3; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_4 = &cfiType_4; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_5 = &cfiType_5; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_6 = &cfiType_6; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_7 = &cfiType_7; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_8 = &cfiType_8; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_9 = &cfiType_9; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_10 = &cfiType_10; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_11 = &cfiType_11; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_12 = &cfiType_12; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_13 = &cfiType_13; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_14 = &cfiType_14; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_15 = &cfiType_15; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_16 = &cfiType_16; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_17 = &cfiType_17; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_18 = &cfiType_18; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_19 = &cfiType_19; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_20 = &cfiType_20; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_21 = &cfiType_21; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_22 = &cfiType_22; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_23 = &cfiType_23; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_24 = &cfiType_24; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_25 = &cfiType_25; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_26 = &cfiType_26; // @[BTB.scala:208:20, :326:42]
wire _doPeek_T_27 = &cfiType_27; // @[BTB.scala:208:20, :326:42]
wire [1:0] doPeek_lo_lo_lo_hi = {_doPeek_T_2, _doPeek_T_1}; // @[package.scala:45:27]
wire [2:0] doPeek_lo_lo_lo = {doPeek_lo_lo_lo_hi, _doPeek_T}; // @[package.scala:45:27]
wire [1:0] doPeek_lo_lo_hi_lo = {_doPeek_T_4, _doPeek_T_3}; // @[package.scala:45:27]
wire [1:0] doPeek_lo_lo_hi_hi = {_doPeek_T_6, _doPeek_T_5}; // @[package.scala:45:27]
wire [3:0] doPeek_lo_lo_hi = {doPeek_lo_lo_hi_hi, doPeek_lo_lo_hi_lo}; // @[package.scala:45:27]
wire [6:0] doPeek_lo_lo = {doPeek_lo_lo_hi, doPeek_lo_lo_lo}; // @[package.scala:45:27]
wire [1:0] doPeek_lo_hi_lo_hi = {_doPeek_T_9, _doPeek_T_8}; // @[package.scala:45:27]
wire [2:0] doPeek_lo_hi_lo = {doPeek_lo_hi_lo_hi, _doPeek_T_7}; // @[package.scala:45:27]
wire [1:0] doPeek_lo_hi_hi_lo = {_doPeek_T_11, _doPeek_T_10}; // @[package.scala:45:27]
wire [1:0] doPeek_lo_hi_hi_hi = {_doPeek_T_13, _doPeek_T_12}; // @[package.scala:45:27]
wire [3:0] doPeek_lo_hi_hi = {doPeek_lo_hi_hi_hi, doPeek_lo_hi_hi_lo}; // @[package.scala:45:27]
wire [6:0] doPeek_lo_hi = {doPeek_lo_hi_hi, doPeek_lo_hi_lo}; // @[package.scala:45:27]
wire [13:0] doPeek_lo = {doPeek_lo_hi, doPeek_lo_lo}; // @[package.scala:45:27]
wire [1:0] doPeek_hi_lo_lo_hi = {_doPeek_T_16, _doPeek_T_15}; // @[package.scala:45:27]
wire [2:0] doPeek_hi_lo_lo = {doPeek_hi_lo_lo_hi, _doPeek_T_14}; // @[package.scala:45:27]
wire [1:0] doPeek_hi_lo_hi_lo = {_doPeek_T_18, _doPeek_T_17}; // @[package.scala:45:27]
wire [1:0] doPeek_hi_lo_hi_hi = {_doPeek_T_20, _doPeek_T_19}; // @[package.scala:45:27]
wire [3:0] doPeek_hi_lo_hi = {doPeek_hi_lo_hi_hi, doPeek_hi_lo_hi_lo}; // @[package.scala:45:27]
wire [6:0] doPeek_hi_lo = {doPeek_hi_lo_hi, doPeek_hi_lo_lo}; // @[package.scala:45:27]
wire [1:0] doPeek_hi_hi_lo_hi = {_doPeek_T_23, _doPeek_T_22}; // @[package.scala:45:27]
wire [2:0] doPeek_hi_hi_lo = {doPeek_hi_hi_lo_hi, _doPeek_T_21}; // @[package.scala:45:27]
wire [1:0] doPeek_hi_hi_hi_lo = {_doPeek_T_25, _doPeek_T_24}; // @[package.scala:45:27]
wire [1:0] doPeek_hi_hi_hi_hi = {_doPeek_T_27, _doPeek_T_26}; // @[package.scala:45:27]
wire [3:0] doPeek_hi_hi_hi = {doPeek_hi_hi_hi_hi, doPeek_hi_hi_hi_lo}; // @[package.scala:45:27]
wire [6:0] doPeek_hi_hi = {doPeek_hi_hi_hi, doPeek_hi_hi_lo}; // @[package.scala:45:27]
wire [13:0] doPeek_hi = {doPeek_hi_hi, doPeek_hi_lo}; // @[package.scala:45:27]
wire [27:0] _doPeek_T_28 = {doPeek_hi, doPeek_lo}; // @[package.scala:45:27]
wire [27:0] _doPeek_T_29 = idxHit & _doPeek_T_28; // @[package.scala:45:27]
wire doPeek = |_doPeek_T_29; // @[BTB.scala:326:{26,67}]
wire _io_ras_head_valid_T = ~(|count); // @[BTB.scala:54:29, :56:30]
assign _io_ras_head_valid_T_1 = ~_io_ras_head_valid_T; // @[BTB.scala:54:29, :327:26]
assign io_ras_head_valid_0 = _io_ras_head_valid_T_1; // @[BTB.scala:187:7, :327:26]
wire [7:0][38:0] _GEN_0 = {{stack_0}, {stack_0}, {stack_5}, {stack_4}, {stack_3}, {stack_2}, {stack_1}, {stack_0}}; // @[BTB.scala:58:26, :328:22]
assign io_ras_head_bits_0 = _GEN_0[pos]; // @[BTB.scala:57:28, :187:7, :328:22]
assign io_resp_bits_target_0 = (|count) & doPeek ? io_ras_head_bits_0 : _io_resp_bits_target_T_181; // @[BTB.scala:54:29, :56:30, :187:7, :289:{23,29}, :326:67, :329:{24,35}, :330:27]
wire [3:0] _GEN_1 = {1'h0, count}; // @[BTB.scala:43:44, :56:30]
wire [3:0] _count_T = _GEN_1 + 4'h1; // @[BTB.scala:43:44]
wire [2:0] _count_T_1 = _count_T[2:0]; // @[BTB.scala:43:44]
wire _nextPos_T = pos < 3'h5; // @[BTB.scala:44:47, :57:28]
wire _nextPos_T_1 = _nextPos_T; // @[BTB.scala:44:{40,47}]
wire [3:0] _GEN_2 = {1'h0, pos}; // @[BTB.scala:44:64, :57:28]
wire [3:0] _nextPos_T_2 = _GEN_2 + 4'h1; // @[BTB.scala:44:64]
wire [2:0] _nextPos_T_3 = _nextPos_T_2[2:0]; // @[BTB.scala:44:64]
wire [2:0] nextPos = _nextPos_T_1 ? _nextPos_T_3 : 3'h0; // @[BTB.scala:44:{22,40,64}, :51:40]
wire [3:0] _count_T_2 = _GEN_1 - 4'h1; // @[BTB.scala:43:44, :50:20]
wire [2:0] _count_T_3 = _count_T_2[2:0]; // @[BTB.scala:50:20]
wire _pos_T = |pos; // @[BTB.scala:51:40, :57:28]
wire _pos_T_1 = _pos_T; // @[BTB.scala:51:{33,40}]
wire [3:0] _pos_T_2 = _GEN_2 - 4'h1; // @[BTB.scala:44:64, :51:50]
wire [2:0] _pos_T_3 = _pos_T_2[2:0]; // @[BTB.scala:51:50]
wire [2:0] _pos_T_4 = _pos_T_1 ? _pos_T_3 : 3'h5; // @[BTB.scala:51:{15,33,50}]
wire [4:0] _T_5 = idxWritesEven ? idxPageReplEn[4:0] : tgtPageReplEn[4:0]; // @[BTB.scala:241:26, :247:26, :274:25, :280:24]
wire [24:0] _T_8 = idxWritesEven ? r_btb_update_bits_pc[38:14] : io_req_bits_addr_0[38:14]; // @[Valid.scala:135:21]
wire [4:0] _T_12 = idxWritesEven ? tgtPageReplEn[5:1] : idxPageReplEn[5:1]; // @[BTB.scala:241:26, :247:26, :274:25, :282:24]
wire [24:0] _T_15 = idxWritesEven ? io_req_bits_addr_0[38:14] : r_btb_update_bits_pc[38:14]; // @[Valid.scala:135:21]
wire _T_139 = io_ras_update_bits_cfiType_0 == 2'h2; // @[BTB.scala:187:7, :333:40]
always @(posedge clock) begin // @[BTB.scala:187:7]
if (r_btb_update_valid & waddr == 5'h0) begin // @[Valid.scala:135:21]
idxs_0 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_0 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_0 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_0 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_0 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_0 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'h1) begin // @[Valid.scala:135:21]
idxs_1 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_1 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_1 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_1 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_1 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_1 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'h2) begin // @[Valid.scala:135:21]
idxs_2 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_2 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_2 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_2 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_2 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_2 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'h3) begin // @[Valid.scala:135:21]
idxs_3 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_3 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_3 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_3 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_3 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_3 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'h4) begin // @[Valid.scala:135:21]
idxs_4 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_4 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_4 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_4 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_4 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_4 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'h5) begin // @[Valid.scala:135:21]
idxs_5 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_5 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_5 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_5 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_5 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_5 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'h6) begin // @[Valid.scala:135:21]
idxs_6 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_6 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_6 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_6 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_6 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_6 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'h7) begin // @[Valid.scala:135:21]
idxs_7 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_7 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_7 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_7 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_7 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_7 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'h8) begin // @[Valid.scala:135:21]
idxs_8 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_8 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_8 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_8 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_8 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_8 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'h9) begin // @[Valid.scala:135:21]
idxs_9 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_9 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_9 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_9 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_9 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_9 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'hA) begin // @[Valid.scala:135:21]
idxs_10 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_10 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_10 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_10 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_10 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_10 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'hB) begin // @[Valid.scala:135:21]
idxs_11 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_11 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_11 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_11 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_11 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_11 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'hC) begin // @[Valid.scala:135:21]
idxs_12 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_12 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_12 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_12 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_12 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_12 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'hD) begin // @[Valid.scala:135:21]
idxs_13 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_13 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_13 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_13 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_13 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_13 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'hE) begin // @[Valid.scala:135:21]
idxs_14 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_14 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_14 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_14 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_14 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_14 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'hF) begin // @[Valid.scala:135:21]
idxs_15 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_15 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_15 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_15 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_15 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_15 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'h10) begin // @[Valid.scala:135:21]
idxs_16 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_16 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_16 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_16 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_16 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_16 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'h11) begin // @[Valid.scala:135:21]
idxs_17 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_17 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_17 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_17 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_17 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_17 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'h12) begin // @[Valid.scala:135:21]
idxs_18 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_18 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_18 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_18 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_18 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_18 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'h13) begin // @[Valid.scala:135:21]
idxs_19 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_19 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_19 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_19 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_19 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_19 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'h14) begin // @[Valid.scala:135:21]
idxs_20 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_20 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_20 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_20 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_20 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_20 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'h15) begin // @[Valid.scala:135:21]
idxs_21 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_21 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_21 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_21 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_21 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_21 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'h16) begin // @[Valid.scala:135:21]
idxs_22 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_22 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_22 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_22 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_22 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_22 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'h17) begin // @[Valid.scala:135:21]
idxs_23 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_23 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_23 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_23 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_23 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_23 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'h18) begin // @[Valid.scala:135:21]
idxs_24 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_24 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_24 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_24 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_24 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_24 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'h19) begin // @[Valid.scala:135:21]
idxs_25 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_25 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_25 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_25 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_25 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_25 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'h1A) begin // @[Valid.scala:135:21]
idxs_26 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_26 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_26 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_26 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_26 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_26 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & waddr == 5'h1B) begin // @[Valid.scala:135:21]
idxs_27 <= _idxs_T; // @[BTB.scala:199:17, :264:40]
idxPages_27 <= _idxPages_T[2:0]; // @[BTB.scala:200:21, :266:{21,38}]
tgts_27 <= _tgts_T; // @[BTB.scala:201:17, :265:33]
tgtPages_27 <= tgtPageUpdate; // @[OneHot.scala:32:10]
cfiType_27 <= r_btb_update_bits_cfiType; // @[Valid.scala:135:21]
brIdx_27 <= _brIdx_T[0]; // @[BTB.scala:209:18, :271:{20,47}]
end
if (r_btb_update_valid & _T_5[0]) // @[Valid.scala:135:21]
pages_0 <= _T_8; // @[BTB.scala:203:18, :281:10]
if (r_btb_update_valid & _T_12[0]) // @[Valid.scala:135:21]
pages_1 <= _T_15; // @[BTB.scala:203:18, :283:10]
if (r_btb_update_valid & _T_5[2]) // @[Valid.scala:135:21]
pages_2 <= _T_8; // @[BTB.scala:203:18, :281:10]
if (r_btb_update_valid & _T_12[2]) // @[Valid.scala:135:21]
pages_3 <= _T_15; // @[BTB.scala:203:18, :283:10]
if (r_btb_update_valid & _T_5[4]) // @[Valid.scala:135:21]
pages_4 <= _T_8; // @[BTB.scala:203:18, :281:10]
if (r_btb_update_valid & _T_12[4]) // @[Valid.scala:135:21]
pages_5 <= _T_15; // @[BTB.scala:203:18, :283:10]
if (io_btb_update_valid_0) begin // @[BTB.scala:187:7]
r_btb_update_pipe_b_prediction_cfiType <= io_btb_update_bits_prediction_cfiType_0; // @[Valid.scala:142:26]
r_btb_update_pipe_b_prediction_taken <= io_btb_update_bits_prediction_taken_0; // @[Valid.scala:142:26]
r_btb_update_pipe_b_prediction_mask <= io_btb_update_bits_prediction_mask_0; // @[Valid.scala:142:26]
r_btb_update_pipe_b_prediction_bridx <= io_btb_update_bits_prediction_bridx_0; // @[Valid.scala:142:26]
r_btb_update_pipe_b_prediction_target <= io_btb_update_bits_prediction_target_0; // @[Valid.scala:142:26]
r_btb_update_pipe_b_prediction_entry <= io_btb_update_bits_prediction_entry_0; // @[Valid.scala:142:26]
r_btb_update_pipe_b_prediction_bht_history <= io_btb_update_bits_prediction_bht_history_0; // @[Valid.scala:142:26]
r_btb_update_pipe_b_prediction_bht_value <= io_btb_update_bits_prediction_bht_value_0; // @[Valid.scala:142:26]
r_btb_update_pipe_b_pc <= io_btb_update_bits_pc_0; // @[Valid.scala:142:26]
r_btb_update_pipe_b_target <= io_btb_update_bits_target_0; // @[Valid.scala:142:26]
r_btb_update_pipe_b_isValid <= io_btb_update_bits_isValid_0; // @[Valid.scala:142:26]
r_btb_update_pipe_b_br_pc <= io_btb_update_bits_br_pc_0; // @[Valid.scala:142:26]
r_btb_update_pipe_b_cfiType <= io_btb_update_bits_cfiType_0; // @[Valid.scala:142:26]
end
if (io_resp_valid_0) begin // @[BTB.scala:187:7]
r_resp_pipe_b_cfiType <= io_resp_bits_cfiType_0; // @[Valid.scala:142:26]
r_resp_pipe_b_taken <= io_resp_bits_taken_0; // @[Valid.scala:142:26]
r_resp_pipe_b_mask <= io_resp_bits_mask_0; // @[Valid.scala:142:26]
r_resp_pipe_b_bridx <= io_resp_bits_bridx_0; // @[Valid.scala:142:26]
r_resp_pipe_b_target <= io_resp_bits_target_0; // @[Valid.scala:142:26]
r_resp_pipe_b_entry <= io_resp_bits_entry_0; // @[Valid.scala:142:26]
r_resp_pipe_b_bht_history <= io_resp_bits_bht_history_0; // @[Valid.scala:142:26]
r_resp_pipe_b_bht_value <= io_resp_bits_bht_value_0; // @[Valid.scala:142:26]
end
if (io_ras_update_valid_0 & _T_139 & nextPos == 3'h0) // @[BTB.scala:44:22, :45:20, :51:40, :58:26, :187:7, :332:32, :333:{40,58}]
stack_0 <= io_ras_update_bits_returnAddr_0; // @[BTB.scala:58:26, :187:7]
if (io_ras_update_valid_0 & _T_139 & nextPos == 3'h1) // @[package.scala:39:86]
stack_1 <= io_ras_update_bits_returnAddr_0; // @[BTB.scala:58:26, :187:7]
if (io_ras_update_valid_0 & _T_139 & nextPos == 3'h2) // @[package.scala:39:86]
stack_2 <= io_ras_update_bits_returnAddr_0; // @[BTB.scala:58:26, :187:7]
if (io_ras_update_valid_0 & _T_139 & nextPos == 3'h3) // @[package.scala:39:86]
stack_3 <= io_ras_update_bits_returnAddr_0; // @[BTB.scala:58:26, :187:7]
if (io_ras_update_valid_0 & _T_139 & nextPos == 3'h4) // @[BTB.scala:44:22, :45:20, :58:26, :187:7, :332:32, :333:{40,58}]
stack_4 <= io_ras_update_bits_returnAddr_0; // @[BTB.scala:58:26, :187:7]
if (io_ras_update_valid_0 & _T_139 & nextPos == 3'h5) // @[BTB.scala:44:22, :45:20, :58:26, :187:7, :332:32, :333:{40,58}]
stack_5 <= io_ras_update_bits_returnAddr_0; // @[BTB.scala:58:26, :187:7]
if (reset) begin // @[BTB.scala:187:7]
pageValid <= 6'h0; // @[BTB.scala:204:26]
isValid <= 28'h0; // @[BTB.scala:207:24]
r_btb_update_pipe_v <= 1'h0; // @[Valid.scala:141:24]
nextPageRepl <= 3'h0; // @[BTB.scala:51:40, :237:29]
state_reg <= 27'h0; // @[Replacement.scala:168:70]
r_resp_pipe_v <= 1'h0; // @[Valid.scala:141:24]
history <= 8'h0; // @[BTB.scala:117:24]
reset_waddr <= 10'h0; // @[BTB.scala:119:36]
count <= 3'h0; // @[BTB.scala:51:40, :56:30]
pos <= 3'h0; // @[BTB.scala:51:40, :57:28]
end
else begin // @[BTB.scala:187:7]
if (r_btb_update_valid) // @[Valid.scala:135:21]
pageValid <= _pageValid_T_1[5:0]; // @[BTB.scala:204:26, :284:{15,44}]
if (io_flush_0) // @[BTB.scala:187:7]
isValid <= 28'h0; // @[BTB.scala:207:24]
else if (leftTwo_5 | rightTwo_14 | leftOne_13 & rightOne_26) // @[Misc.scala:183:{16,37,49,61}]
isValid <= _isValid_T_5; // @[BTB.scala:207:24, :297:24]
else if (r_btb_update_valid) // @[Valid.scala:135:21]
isValid <= _isValid_T_3[27:0]; // @[BTB.scala:207:24, :269:{13,19}]
r_btb_update_pipe_v <= io_btb_update_valid_0; // @[Valid.scala:141:24]
if (r_btb_update_valid & (doIdxPageRepl | doTgtPageRepl)) // @[Valid.scala:135:21]
nextPageRepl <= _nextPageRepl_T_2; // @[BTB.scala:237:29, :252:24]
if (r_resp_valid & r_resp_bits_taken | r_btb_update_valid) // @[Valid.scala:135:21]
state_reg <= _state_reg_T_92; // @[Replacement.scala:168:70, :202:12]
r_resp_pipe_v <= io_resp_valid_0; // @[Valid.scala:141:24]
if (io_bht_update_valid_0 & io_bht_update_bits_mispredict_0) // @[BTB.scala:187:7, :307:33, :310:32, :311:40]
history <= io_bht_update_bits_branch_0 ? _history_T_3 : io_bht_update_bits_prediction_history_0; // @[BTB.scala:107:13, :110:{13,19}, :117:24, :187:7, :307:33, :313:46, :316:50]
else if (io_bht_advance_valid_0) // @[BTB.scala:187:7]
history <= _history_T_1; // @[BTB.scala:113:19, :117:24]
if (resetting) // @[BTB.scala:120:27]
reset_waddr <= _reset_waddr_T_1; // @[BTB.scala:119:36, :124:49]
if (io_ras_update_valid_0) begin // @[BTB.scala:187:7]
if (_T_139) begin // @[BTB.scala:333:40]
if (count[2:1] != 2'h3) // @[BTB.scala:43:17, :56:30]
count <= _count_T_1; // @[BTB.scala:43:44, :56:30]
pos <= nextPos; // @[BTB.scala:44:22, :57:28]
end
else if ((&io_ras_update_bits_cfiType_0) & (|count)) begin // @[BTB.scala:49:37, :50:11, :54:29, :56:30, :187:7, :335:{46,63}]
count <= _count_T_3; // @[BTB.scala:50:20, :56:30]
pos <= _pos_T_4; // @[BTB.scala:51:15, :57:28]
end
end
end
always @(posedge)
table_512x1 table_ext ( // @[BTB.scala:116:26]
.R0_addr (_res_res_value_T_7), // @[BTB.scala:88:20]
.R0_en (1'h1), // @[BTB.scala:187:7]
.R0_clk (clock),
.R0_data (_table_ext_R0_data),
.W0_addr (waddr_1[8:0]), // @[BTB.scala:122:31, :125:21]
.W0_en (wen), // @[BTB.scala:121:29]
.W0_clk (clock),
.W0_data (wdata) // @[BTB.scala:123:31]
); // @[BTB.scala:116:26]
assign io_resp_valid = io_resp_valid_0; // @[BTB.scala:187:7]
assign io_resp_bits_cfiType = io_resp_bits_cfiType_0; // @[BTB.scala:187:7]
assign io_resp_bits_taken = io_resp_bits_taken_0; // @[BTB.scala:187:7]
assign io_resp_bits_mask = io_resp_bits_mask_0; // @[BTB.scala:187:7]
assign io_resp_bits_bridx = io_resp_bits_bridx_0; // @[BTB.scala:187:7]
assign io_resp_bits_target = io_resp_bits_target_0; // @[BTB.scala:187:7]
assign io_resp_bits_entry = io_resp_bits_entry_0; // @[BTB.scala:187:7]
assign io_resp_bits_bht_history = io_resp_bits_bht_history_0; // @[BTB.scala:187:7]
assign io_resp_bits_bht_value = io_resp_bits_bht_value_0; // @[BTB.scala:187:7]
assign io_ras_head_valid = io_ras_head_valid_0; // @[BTB.scala:187:7]
assign io_ras_head_bits = io_ras_head_bits_0; // @[BTB.scala:187:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File fp-pipeline.scala:
//******************************************************************************
// Copyright (c) 2015 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Floating Point Datapath Pipeline
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.exu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.rocket
import freechips.rocketchip.tile
import boom.v3.exu.FUConstants._
import boom.v3.common._
import boom.v3.util.{BoomCoreStringPrefix}
/**
* Top level datapath that wraps the floating point issue window, regfile, and arithmetic units.
*/
class FpPipeline(implicit p: Parameters) extends BoomModule with tile.HasFPUParameters
{
val fpIssueParams = issueParams.find(_.iqType == IQT_FP.litValue).get
val dispatchWidth = fpIssueParams.dispatchWidth
val numLlPorts = memWidth
val numWakeupPorts = fpIssueParams.issueWidth + numLlPorts
val fpPregSz = log2Ceil(numFpPhysRegs)
val io = IO(new Bundle {
val brupdate = Input(new BrUpdateInfo())
val flush_pipeline = Input(Bool())
val fcsr_rm = Input(UInt(width=freechips.rocketchip.tile.FPConstants.RM_SZ.W))
val status = Input(new freechips.rocketchip.rocket.MStatus())
val dis_uops = Vec(dispatchWidth, Flipped(Decoupled(new MicroOp)))
// +1 for recoding.
val ll_wports = Flipped(Vec(memWidth, Decoupled(new ExeUnitResp(fLen+1))))// from memory unit
val from_int = Flipped(Decoupled(new ExeUnitResp(fLen+1)))// from integer RF
val to_sdq = Decoupled(new ExeUnitResp(fLen)) // to Load/Store Unit
val to_int = Decoupled(new ExeUnitResp(xLen)) // to integer RF
val wakeups = Vec(numWakeupPorts, Valid(new ExeUnitResp(fLen+1)))
val wb_valids = Input(Vec(numWakeupPorts, Bool()))
val wb_pdsts = Input(Vec(numWakeupPorts, UInt(width=fpPregSz.W)))
val debug_tsc_reg = Input(UInt(width=xLen.W))
val debug_wb_wdata = Output(Vec(numWakeupPorts, UInt((fLen+1).W)))
})
//**********************************
// construct all of the modules
val exe_units = new boom.v3.exu.ExecutionUnits(fpu=true)
val issue_unit = Module(new IssueUnitCollapsing(
issueParams.find(_.iqType == IQT_FP.litValue).get,
numWakeupPorts))
issue_unit.suggestName("fp_issue_unit")
val fregfile = Module(new RegisterFileSynthesizable(numFpPhysRegs,
exe_units.numFrfReadPorts,
exe_units.numFrfWritePorts + memWidth,
fLen+1,
// No bypassing for any FP units, + memWidth for ll_wb
Seq.fill(exe_units.numFrfWritePorts + memWidth){ false }
))
val fregister_read = Module(new RegisterRead(
issue_unit.issueWidth,
exe_units.withFilter(_.readsFrf).map(_.supportedFuncUnits).toSeq,
exe_units.numFrfReadPorts,
exe_units.withFilter(_.readsFrf).map(x => 3).toSeq,
0, // No bypass for FP
0,
fLen+1))
require (exe_units.count(_.readsFrf) == issue_unit.issueWidth)
require (exe_units.numFrfWritePorts + numLlPorts == numWakeupPorts)
//*************************************************************
// Issue window logic
val iss_valids = Wire(Vec(exe_units.numFrfReaders, Bool()))
val iss_uops = Wire(Vec(exe_units.numFrfReaders, new MicroOp()))
issue_unit.io.tsc_reg := io.debug_tsc_reg
issue_unit.io.brupdate := io.brupdate
issue_unit.io.flush_pipeline := io.flush_pipeline
// Don't support ld-hit speculation to FP window.
for (w <- 0 until memWidth) {
issue_unit.io.spec_ld_wakeup(w).valid := false.B
issue_unit.io.spec_ld_wakeup(w).bits := 0.U
}
issue_unit.io.ld_miss := false.B
require (exe_units.numTotalBypassPorts == 0)
//-------------------------------------------------------------
// **** Dispatch Stage ****
//-------------------------------------------------------------
// Input (Dispatch)
for (w <- 0 until dispatchWidth) {
issue_unit.io.dis_uops(w) <> io.dis_uops(w)
}
//-------------------------------------------------------------
// **** Issue Stage ****
//-------------------------------------------------------------
// Output (Issue)
for (i <- 0 until issue_unit.issueWidth) {
iss_valids(i) := issue_unit.io.iss_valids(i)
iss_uops(i) := issue_unit.io.iss_uops(i)
var fu_types = exe_units(i).io.fu_types
if (exe_units(i).supportedFuncUnits.fdiv) {
val fdiv_issued = iss_valids(i) && iss_uops(i).fu_code_is(FU_FDV)
fu_types = fu_types & RegNext(~Mux(fdiv_issued, FU_FDV, 0.U))
}
issue_unit.io.fu_types(i) := fu_types
require (exe_units(i).readsFrf)
}
// Wakeup
for ((writeback, issue_wakeup) <- io.wakeups zip issue_unit.io.wakeup_ports) {
issue_wakeup.valid := writeback.valid
issue_wakeup.bits.pdst := writeback.bits.uop.pdst
issue_wakeup.bits.poisoned := false.B
}
issue_unit.io.pred_wakeup_port.valid := false.B
issue_unit.io.pred_wakeup_port.bits := DontCare
//-------------------------------------------------------------
// **** Register Read Stage ****
//-------------------------------------------------------------
// Register Read <- Issue (rrd <- iss)
fregister_read.io.rf_read_ports <> fregfile.io.read_ports
fregister_read.io.prf_read_ports map { port => port.data := false.B }
fregister_read.io.iss_valids <> iss_valids
fregister_read.io.iss_uops := iss_uops
fregister_read.io.brupdate := io.brupdate
fregister_read.io.kill := io.flush_pipeline
//-------------------------------------------------------------
// **** Execute Stage ****
//-------------------------------------------------------------
exe_units.map(_.io.brupdate := io.brupdate)
for ((ex,w) <- exe_units.withFilter(_.readsFrf).map(x=>x).zipWithIndex) {
ex.io.req <> fregister_read.io.exe_reqs(w)
require (!ex.bypassable)
}
require (exe_units.numTotalBypassPorts == 0)
//-------------------------------------------------------------
// **** Writeback Stage ****
//-------------------------------------------------------------
val ll_wbarb = Module(new Arbiter(new ExeUnitResp(fLen+1), 2))
// Hookup load writeback -- and recode FP values.
ll_wbarb.io.in(0) <> io.ll_wports(0)
ll_wbarb.io.in(0).bits.data := recode(io.ll_wports(0).bits.data,
io.ll_wports(0).bits.uop.mem_size =/= 2.U)
val ifpu_resp = io.from_int
ll_wbarb.io.in(1) <> ifpu_resp
// Cut up critical path by delaying the write by a cycle.
// Wakeup signal is sent on cycle S0, write is now delayed until end of S1,
// but Issue happens on S1 and RegRead doesn't happen until S2 so we're safe.
fregfile.io.write_ports(0) := RegNext(WritePort(ll_wbarb.io.out, fpregSz, fLen+1, RT_FLT))
assert (ll_wbarb.io.in(0).ready) // never backpressure the memory unit.
when (ifpu_resp.valid) { assert (ifpu_resp.bits.uop.rf_wen && ifpu_resp.bits.uop.dst_rtype === RT_FLT) }
var w_cnt = 1
for (i <- 1 until memWidth) {
fregfile.io.write_ports(w_cnt) := RegNext(WritePort(io.ll_wports(i), fpregSz, fLen+1, RT_FLT))
fregfile.io.write_ports(w_cnt).bits.data := RegNext(recode(io.ll_wports(i).bits.data,
io.ll_wports(i).bits.uop.mem_size =/= 2.U))
w_cnt += 1
}
for (eu <- exe_units) {
if (eu.writesFrf) {
fregfile.io.write_ports(w_cnt).valid := eu.io.fresp.valid && eu.io.fresp.bits.uop.rf_wen
fregfile.io.write_ports(w_cnt).bits.addr := eu.io.fresp.bits.uop.pdst
fregfile.io.write_ports(w_cnt).bits.data := eu.io.fresp.bits.data
eu.io.fresp.ready := true.B
when (eu.io.fresp.valid) {
assert(eu.io.fresp.ready, "No backpressuring the FPU")
assert(eu.io.fresp.bits.uop.rf_wen, "rf_wen must be high here")
assert(eu.io.fresp.bits.uop.dst_rtype === RT_FLT, "wb type must be FLT for fpu")
}
w_cnt += 1
}
}
require (w_cnt == fregfile.io.write_ports.length)
val fpiu_unit = exe_units.fpiu_unit
val fpiu_is_sdq = fpiu_unit.io.ll_iresp.bits.uop.uopc === uopSTA
io.to_int.valid := fpiu_unit.io.ll_iresp.fire && !fpiu_is_sdq
io.to_sdq.valid := fpiu_unit.io.ll_iresp.fire && fpiu_is_sdq
io.to_int.bits := fpiu_unit.io.ll_iresp.bits
io.to_sdq.bits := fpiu_unit.io.ll_iresp.bits
fpiu_unit.io.ll_iresp.ready := io.to_sdq.ready && io.to_int.ready
//-------------------------------------------------------------
//-------------------------------------------------------------
// **** Commit Stage ****
//-------------------------------------------------------------
//-------------------------------------------------------------
io.wakeups(0).valid := ll_wbarb.io.out.valid
io.wakeups(0).bits := ll_wbarb.io.out.bits
ll_wbarb.io.out.ready := true.B
w_cnt = 1
for (i <- 1 until memWidth) {
io.wakeups(w_cnt) := io.ll_wports(i)
io.wakeups(w_cnt).bits.data := recode(io.ll_wports(i).bits.data,
io.ll_wports(i).bits.uop.mem_size =/= 2.U)
w_cnt += 1
}
for (eu <- exe_units) {
if (eu.writesFrf) {
val exe_resp = eu.io.fresp
val wb_uop = eu.io.fresp.bits.uop
val wport = io.wakeups(w_cnt)
wport.valid := exe_resp.valid && wb_uop.dst_rtype === RT_FLT
wport.bits := exe_resp.bits
w_cnt += 1
assert(!(exe_resp.valid && wb_uop.uses_ldq))
assert(!(exe_resp.valid && wb_uop.uses_stq))
assert(!(exe_resp.valid && wb_uop.is_amo))
}
}
for ((wdata, wakeup) <- io.debug_wb_wdata zip io.wakeups) {
wdata := ieee(wakeup.bits.data)
}
exe_units.map(_.io.fcsr_rm := io.fcsr_rm)
exe_units.map(_.io.status := io.status)
//-------------------------------------------------------------
// **** Flush Pipeline ****
//-------------------------------------------------------------
// flush on exceptions, miniexeptions, and after some special instructions
for (w <- 0 until exe_units.length) {
exe_units(w).io.req.bits.kill := io.flush_pipeline
}
override def toString: String =
(BoomCoreStringPrefix("===FP Pipeline===") + "\n"
+ fregfile.toString
+ BoomCoreStringPrefix(
"Num Wakeup Ports : " + numWakeupPorts,
"Num Bypass Ports : " + exe_units.numTotalBypassPorts))
}
File FPU.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tile
import chisel3._
import chisel3.util._
import chisel3.{DontCare, WireInit, withClock, withReset}
import chisel3.experimental.SourceInfo
import chisel3.experimental.dataview._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.rocket._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property
case class FPUParams(
minFLen: Int = 32,
fLen: Int = 64,
divSqrt: Boolean = true,
sfmaLatency: Int = 3,
dfmaLatency: Int = 4,
fpmuLatency: Int = 2,
ifpuLatency: Int = 2
)
object FPConstants
{
val RM_SZ = 3
val FLAGS_SZ = 5
}
trait HasFPUCtrlSigs {
val ldst = Bool()
val wen = Bool()
val ren1 = Bool()
val ren2 = Bool()
val ren3 = Bool()
val swap12 = Bool()
val swap23 = Bool()
val typeTagIn = UInt(2.W)
val typeTagOut = UInt(2.W)
val fromint = Bool()
val toint = Bool()
val fastpipe = Bool()
val fma = Bool()
val div = Bool()
val sqrt = Bool()
val wflags = Bool()
val vec = Bool()
}
class FPUCtrlSigs extends Bundle with HasFPUCtrlSigs
class FPUDecoder(implicit p: Parameters) extends FPUModule()(p) {
val io = IO(new Bundle {
val inst = Input(Bits(32.W))
val sigs = Output(new FPUCtrlSigs())
})
private val X2 = BitPat.dontCare(2)
val default = List(X,X,X,X,X,X,X,X2,X2,X,X,X,X,X,X,X,N)
val h: Array[(BitPat, List[BitPat])] =
Array(FLH -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSH -> List(Y,N,N,Y,N,Y,X, I, H,N,Y,N,N,N,N,N,N),
FMV_H_X -> List(N,Y,N,N,N,X,X, H, I,Y,N,N,N,N,N,N,N),
FCVT_H_W -> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_WU-> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_L -> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_LU-> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FMV_X_H -> List(N,N,Y,N,N,N,X, I, H,N,Y,N,N,N,N,N,N),
FCLASS_H -> List(N,N,Y,N,N,N,X, H, H,N,Y,N,N,N,N,N,N),
FCVT_W_H -> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_H-> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_H -> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_H-> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_S_H -> List(N,Y,Y,N,N,N,X, H, S,N,N,Y,N,N,N,Y,N),
FCVT_H_S -> List(N,Y,Y,N,N,N,X, S, H,N,N,Y,N,N,N,Y,N),
FEQ_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FLT_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FLE_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FSGNJ_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FSGNJN_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FSGNJX_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FMIN_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,Y,N),
FMAX_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,Y,N),
FADD_H -> List(N,Y,Y,Y,N,N,Y, H, H,N,N,N,Y,N,N,Y,N),
FSUB_H -> List(N,Y,Y,Y,N,N,Y, H, H,N,N,N,Y,N,N,Y,N),
FMUL_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,N,Y,N,N,Y,N),
FMADD_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FMSUB_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FNMADD_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FNMSUB_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FDIV_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,N,N,Y,N,Y,N),
FSQRT_H -> List(N,Y,Y,N,N,N,X, H, H,N,N,N,N,N,Y,Y,N))
val f: Array[(BitPat, List[BitPat])] =
Array(FLW -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSW -> List(Y,N,N,Y,N,Y,X, I, S,N,Y,N,N,N,N,N,N),
FMV_W_X -> List(N,Y,N,N,N,X,X, S, I,Y,N,N,N,N,N,N,N),
FCVT_S_W -> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_WU-> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_L -> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_LU-> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FMV_X_W -> List(N,N,Y,N,N,N,X, I, S,N,Y,N,N,N,N,N,N),
FCLASS_S -> List(N,N,Y,N,N,N,X, S, S,N,Y,N,N,N,N,N,N),
FCVT_W_S -> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_S-> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_S -> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_S-> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FEQ_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FLT_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FLE_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FSGNJ_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FSGNJN_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FSGNJX_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FMIN_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,Y,N),
FMAX_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,Y,N),
FADD_S -> List(N,Y,Y,Y,N,N,Y, S, S,N,N,N,Y,N,N,Y,N),
FSUB_S -> List(N,Y,Y,Y,N,N,Y, S, S,N,N,N,Y,N,N,Y,N),
FMUL_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,N,Y,N,N,Y,N),
FMADD_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FMSUB_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FNMADD_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FNMSUB_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FDIV_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,N,N,Y,N,Y,N),
FSQRT_S -> List(N,Y,Y,N,N,N,X, S, S,N,N,N,N,N,Y,Y,N))
val d: Array[(BitPat, List[BitPat])] =
Array(FLD -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSD -> List(Y,N,N,Y,N,Y,X, I, D,N,Y,N,N,N,N,N,N),
FMV_D_X -> List(N,Y,N,N,N,X,X, D, I,Y,N,N,N,N,N,N,N),
FCVT_D_W -> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_WU-> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_L -> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_LU-> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FMV_X_D -> List(N,N,Y,N,N,N,X, I, D,N,Y,N,N,N,N,N,N),
FCLASS_D -> List(N,N,Y,N,N,N,X, D, D,N,Y,N,N,N,N,N,N),
FCVT_W_D -> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_D-> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_D -> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_D-> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_S_D -> List(N,Y,Y,N,N,N,X, D, S,N,N,Y,N,N,N,Y,N),
FCVT_D_S -> List(N,Y,Y,N,N,N,X, S, D,N,N,Y,N,N,N,Y,N),
FEQ_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FLT_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FLE_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FSGNJ_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FSGNJN_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FSGNJX_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FMIN_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,Y,N),
FMAX_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,Y,N),
FADD_D -> List(N,Y,Y,Y,N,N,Y, D, D,N,N,N,Y,N,N,Y,N),
FSUB_D -> List(N,Y,Y,Y,N,N,Y, D, D,N,N,N,Y,N,N,Y,N),
FMUL_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,N,Y,N,N,Y,N),
FMADD_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FMSUB_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FNMADD_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FNMSUB_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FDIV_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,N,N,Y,N,Y,N),
FSQRT_D -> List(N,Y,Y,N,N,N,X, D, D,N,N,N,N,N,Y,Y,N))
val fcvt_hd: Array[(BitPat, List[BitPat])] =
Array(FCVT_H_D -> List(N,Y,Y,N,N,N,X, D, H,N,N,Y,N,N,N,Y,N),
FCVT_D_H -> List(N,Y,Y,N,N,N,X, H, D,N,N,Y,N,N,N,Y,N))
val vfmv_f_s: Array[(BitPat, List[BitPat])] =
Array(VFMV_F_S -> List(N,Y,N,N,N,N,X,X2,X2,N,N,N,N,N,N,N,Y))
val insns = ((minFLen, fLen) match {
case (32, 32) => f
case (16, 32) => h ++ f
case (32, 64) => f ++ d
case (16, 64) => h ++ f ++ d ++ fcvt_hd
case other => throw new Exception(s"minFLen = ${minFLen} & fLen = ${fLen} is an unsupported configuration")
}) ++ (if (usingVector) vfmv_f_s else Array[(BitPat, List[BitPat])]())
val decoder = DecodeLogic(io.inst, default, insns)
val s = io.sigs
val sigs = Seq(s.ldst, s.wen, s.ren1, s.ren2, s.ren3, s.swap12,
s.swap23, s.typeTagIn, s.typeTagOut, s.fromint, s.toint,
s.fastpipe, s.fma, s.div, s.sqrt, s.wflags, s.vec)
sigs zip decoder map {case(s,d) => s := d}
}
class FPUCoreIO(implicit p: Parameters) extends CoreBundle()(p) {
val hartid = Input(UInt(hartIdLen.W))
val time = Input(UInt(xLen.W))
val inst = Input(Bits(32.W))
val fromint_data = Input(Bits(xLen.W))
val fcsr_rm = Input(Bits(FPConstants.RM_SZ.W))
val fcsr_flags = Valid(Bits(FPConstants.FLAGS_SZ.W))
val v_sew = Input(UInt(3.W))
val store_data = Output(Bits(fLen.W))
val toint_data = Output(Bits(xLen.W))
val ll_resp_val = Input(Bool())
val ll_resp_type = Input(Bits(3.W))
val ll_resp_tag = Input(UInt(5.W))
val ll_resp_data = Input(Bits(fLen.W))
val valid = Input(Bool())
val fcsr_rdy = Output(Bool())
val nack_mem = Output(Bool())
val illegal_rm = Output(Bool())
val killx = Input(Bool())
val killm = Input(Bool())
val dec = Output(new FPUCtrlSigs())
val sboard_set = Output(Bool())
val sboard_clr = Output(Bool())
val sboard_clra = Output(UInt(5.W))
val keep_clock_enabled = Input(Bool())
}
class FPUIO(implicit p: Parameters) extends FPUCoreIO ()(p) {
val cp_req = Flipped(Decoupled(new FPInput())) //cp doesn't pay attn to kill sigs
val cp_resp = Decoupled(new FPResult())
}
class FPResult(implicit p: Parameters) extends CoreBundle()(p) {
val data = Bits((fLen+1).W)
val exc = Bits(FPConstants.FLAGS_SZ.W)
}
class IntToFPInput(implicit p: Parameters) extends CoreBundle()(p) with HasFPUCtrlSigs {
val rm = Bits(FPConstants.RM_SZ.W)
val typ = Bits(2.W)
val in1 = Bits(xLen.W)
}
class FPInput(implicit p: Parameters) extends CoreBundle()(p) with HasFPUCtrlSigs {
val rm = Bits(FPConstants.RM_SZ.W)
val fmaCmd = Bits(2.W)
val typ = Bits(2.W)
val fmt = Bits(2.W)
val in1 = Bits((fLen+1).W)
val in2 = Bits((fLen+1).W)
val in3 = Bits((fLen+1).W)
}
case class FType(exp: Int, sig: Int) {
def ieeeWidth = exp + sig
def recodedWidth = ieeeWidth + 1
def ieeeQNaN = ((BigInt(1) << (ieeeWidth - 1)) - (BigInt(1) << (sig - 2))).U(ieeeWidth.W)
def qNaN = ((BigInt(7) << (exp + sig - 3)) + (BigInt(1) << (sig - 2))).U(recodedWidth.W)
def isNaN(x: UInt) = x(sig + exp - 1, sig + exp - 3).andR
def isSNaN(x: UInt) = isNaN(x) && !x(sig - 2)
def classify(x: UInt) = {
val sign = x(sig + exp)
val code = x(exp + sig - 1, exp + sig - 3)
val codeHi = code(2, 1)
val isSpecial = codeHi === 3.U
val isHighSubnormalIn = x(exp + sig - 3, sig - 1) < 2.U
val isSubnormal = code === 1.U || codeHi === 1.U && isHighSubnormalIn
val isNormal = codeHi === 1.U && !isHighSubnormalIn || codeHi === 2.U
val isZero = code === 0.U
val isInf = isSpecial && !code(0)
val isNaN = code.andR
val isSNaN = isNaN && !x(sig-2)
val isQNaN = isNaN && x(sig-2)
Cat(isQNaN, isSNaN, isInf && !sign, isNormal && !sign,
isSubnormal && !sign, isZero && !sign, isZero && sign,
isSubnormal && sign, isNormal && sign, isInf && sign)
}
// convert between formats, ignoring rounding, range, NaN
def unsafeConvert(x: UInt, to: FType) = if (this == to) x else {
val sign = x(sig + exp)
val fractIn = x(sig - 2, 0)
val expIn = x(sig + exp - 1, sig - 1)
val fractOut = fractIn << to.sig >> sig
val expOut = {
val expCode = expIn(exp, exp - 2)
val commonCase = (expIn + (1 << to.exp).U) - (1 << exp).U
Mux(expCode === 0.U || expCode >= 6.U, Cat(expCode, commonCase(to.exp - 3, 0)), commonCase(to.exp, 0))
}
Cat(sign, expOut, fractOut)
}
private def ieeeBundle = {
val expWidth = exp
class IEEEBundle extends Bundle {
val sign = Bool()
val exp = UInt(expWidth.W)
val sig = UInt((ieeeWidth-expWidth-1).W)
}
new IEEEBundle
}
def unpackIEEE(x: UInt) = x.asTypeOf(ieeeBundle)
def recode(x: UInt) = hardfloat.recFNFromFN(exp, sig, x)
def ieee(x: UInt) = hardfloat.fNFromRecFN(exp, sig, x)
}
object FType {
val H = new FType(5, 11)
val S = new FType(8, 24)
val D = new FType(11, 53)
val all = List(H, S, D)
}
trait HasFPUParameters {
require(fLen == 0 || FType.all.exists(_.ieeeWidth == fLen))
val minFLen: Int
val fLen: Int
def xLen: Int
val minXLen = 32
val nIntTypes = log2Ceil(xLen/minXLen) + 1
def floatTypes = FType.all.filter(t => minFLen <= t.ieeeWidth && t.ieeeWidth <= fLen)
def minType = floatTypes.head
def maxType = floatTypes.last
def prevType(t: FType) = floatTypes(typeTag(t) - 1)
def maxExpWidth = maxType.exp
def maxSigWidth = maxType.sig
def typeTag(t: FType) = floatTypes.indexOf(t)
def typeTagWbOffset = (FType.all.indexOf(minType) + 1).U
def typeTagGroup(t: FType) = (if (floatTypes.contains(t)) typeTag(t) else typeTag(maxType)).U
// typeTag
def H = typeTagGroup(FType.H)
def S = typeTagGroup(FType.S)
def D = typeTagGroup(FType.D)
def I = typeTag(maxType).U
private def isBox(x: UInt, t: FType): Bool = x(t.sig + t.exp, t.sig + t.exp - 4).andR
private def box(x: UInt, xt: FType, y: UInt, yt: FType): UInt = {
require(xt.ieeeWidth == 2 * yt.ieeeWidth)
val swizzledNaN = Cat(
x(xt.sig + xt.exp, xt.sig + xt.exp - 3),
x(xt.sig - 2, yt.recodedWidth - 1).andR,
x(xt.sig + xt.exp - 5, xt.sig),
y(yt.recodedWidth - 2),
x(xt.sig - 2, yt.recodedWidth - 1),
y(yt.recodedWidth - 1),
y(yt.recodedWidth - 3, 0))
Mux(xt.isNaN(x), swizzledNaN, x)
}
// implement NaN unboxing for FU inputs
def unbox(x: UInt, tag: UInt, exactType: Option[FType]): UInt = {
val outType = exactType.getOrElse(maxType)
def helper(x: UInt, t: FType): Seq[(Bool, UInt)] = {
val prev =
if (t == minType) {
Seq()
} else {
val prevT = prevType(t)
val unswizzled = Cat(
x(prevT.sig + prevT.exp - 1),
x(t.sig - 1),
x(prevT.sig + prevT.exp - 2, 0))
val prev = helper(unswizzled, prevT)
val isbox = isBox(x, t)
prev.map(p => (isbox && p._1, p._2))
}
prev :+ (true.B, t.unsafeConvert(x, outType))
}
val (oks, floats) = helper(x, maxType).unzip
if (exactType.isEmpty || floatTypes.size == 1) {
Mux(oks(tag), floats(tag), maxType.qNaN)
} else {
val t = exactType.get
floats(typeTag(t)) | Mux(oks(typeTag(t)), 0.U, t.qNaN)
}
}
// make sure that the redundant bits in the NaN-boxed encoding are consistent
def consistent(x: UInt): Bool = {
def helper(x: UInt, t: FType): Bool = if (typeTag(t) == 0) true.B else {
val prevT = prevType(t)
val unswizzled = Cat(
x(prevT.sig + prevT.exp - 1),
x(t.sig - 1),
x(prevT.sig + prevT.exp - 2, 0))
val prevOK = !isBox(x, t) || helper(unswizzled, prevT)
val curOK = !t.isNaN(x) || x(t.sig + t.exp - 4) === x(t.sig - 2, prevT.recodedWidth - 1).andR
prevOK && curOK
}
helper(x, maxType)
}
// generate a NaN box from an FU result
def box(x: UInt, t: FType): UInt = {
if (t == maxType) {
x
} else {
val nt = floatTypes(typeTag(t) + 1)
val bigger = box(((BigInt(1) << nt.recodedWidth)-1).U, nt, x, t)
bigger | ((BigInt(1) << maxType.recodedWidth) - (BigInt(1) << nt.recodedWidth)).U
}
}
// generate a NaN box from an FU result
def box(x: UInt, tag: UInt): UInt = {
val opts = floatTypes.map(t => box(x, t))
opts(tag)
}
// zap bits that hardfloat thinks are don't-cares, but we do care about
def sanitizeNaN(x: UInt, t: FType): UInt = {
if (typeTag(t) == 0) {
x
} else {
val maskedNaN = x & ~((BigInt(1) << (t.sig-1)) | (BigInt(1) << (t.sig+t.exp-4))).U(t.recodedWidth.W)
Mux(t.isNaN(x), maskedNaN, x)
}
}
// implement NaN boxing and recoding for FL*/fmv.*.x
def recode(x: UInt, tag: UInt): UInt = {
def helper(x: UInt, t: FType): UInt = {
if (typeTag(t) == 0) {
t.recode(x)
} else {
val prevT = prevType(t)
box(t.recode(x), t, helper(x, prevT), prevT)
}
}
// fill MSBs of subword loads to emulate a wider load of a NaN-boxed value
val boxes = floatTypes.map(t => ((BigInt(1) << maxType.ieeeWidth) - (BigInt(1) << t.ieeeWidth)).U)
helper(boxes(tag) | x, maxType)
}
// implement NaN unboxing and un-recoding for FS*/fmv.x.*
def ieee(x: UInt, t: FType = maxType): UInt = {
if (typeTag(t) == 0) {
t.ieee(x)
} else {
val unrecoded = t.ieee(x)
val prevT = prevType(t)
val prevRecoded = Cat(
x(prevT.recodedWidth-2),
x(t.sig-1),
x(prevT.recodedWidth-3, 0))
val prevUnrecoded = ieee(prevRecoded, prevT)
Cat(unrecoded >> prevT.ieeeWidth, Mux(t.isNaN(x), prevUnrecoded, unrecoded(prevT.ieeeWidth-1, 0)))
}
}
}
abstract class FPUModule(implicit val p: Parameters) extends Module with HasCoreParameters with HasFPUParameters
class FPToInt(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
class Output extends Bundle {
val in = new FPInput
val lt = Bool()
val store = Bits(fLen.W)
val toint = Bits(xLen.W)
val exc = Bits(FPConstants.FLAGS_SZ.W)
}
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new Output)
})
val in = RegEnable(io.in.bits, io.in.valid)
val valid = RegNext(io.in.valid)
val dcmp = Module(new hardfloat.CompareRecFN(maxExpWidth, maxSigWidth))
dcmp.io.a := in.in1
dcmp.io.b := in.in2
dcmp.io.signaling := !in.rm(1)
val tag = in.typeTagOut
val toint_ieee = (floatTypes.map(t => if (t == FType.H) Fill(maxType.ieeeWidth / minXLen, ieee(in.in1)(15, 0).sextTo(minXLen))
else Fill(maxType.ieeeWidth / t.ieeeWidth, ieee(in.in1)(t.ieeeWidth - 1, 0))): Seq[UInt])(tag)
val toint = WireDefault(toint_ieee)
val intType = WireDefault(in.fmt(0))
io.out.bits.store := (floatTypes.map(t => Fill(fLen / t.ieeeWidth, ieee(in.in1)(t.ieeeWidth - 1, 0))): Seq[UInt])(tag)
io.out.bits.toint := ((0 until nIntTypes).map(i => toint((minXLen << i) - 1, 0).sextTo(xLen)): Seq[UInt])(intType)
io.out.bits.exc := 0.U
when (in.rm(0)) {
val classify_out = (floatTypes.map(t => t.classify(maxType.unsafeConvert(in.in1, t))): Seq[UInt])(tag)
toint := classify_out | (toint_ieee >> minXLen << minXLen)
intType := false.B
}
when (in.wflags) { // feq/flt/fle, fcvt
toint := (~in.rm & Cat(dcmp.io.lt, dcmp.io.eq)).orR | (toint_ieee >> minXLen << minXLen)
io.out.bits.exc := dcmp.io.exceptionFlags
intType := false.B
when (!in.ren2) { // fcvt
val cvtType = in.typ.extract(log2Ceil(nIntTypes), 1)
intType := cvtType
val conv = Module(new hardfloat.RecFNToIN(maxExpWidth, maxSigWidth, xLen))
conv.io.in := in.in1
conv.io.roundingMode := in.rm
conv.io.signedOut := ~in.typ(0)
toint := conv.io.out
io.out.bits.exc := Cat(conv.io.intExceptionFlags(2, 1).orR, 0.U(3.W), conv.io.intExceptionFlags(0))
for (i <- 0 until nIntTypes-1) {
val w = minXLen << i
when (cvtType === i.U) {
val narrow = Module(new hardfloat.RecFNToIN(maxExpWidth, maxSigWidth, w))
narrow.io.in := in.in1
narrow.io.roundingMode := in.rm
narrow.io.signedOut := ~in.typ(0)
val excSign = in.in1(maxExpWidth + maxSigWidth) && !maxType.isNaN(in.in1)
val excOut = Cat(conv.io.signedOut === excSign, Fill(w-1, !excSign))
val invalid = conv.io.intExceptionFlags(2) || narrow.io.intExceptionFlags(1)
when (invalid) { toint := Cat(conv.io.out >> w, excOut) }
io.out.bits.exc := Cat(invalid, 0.U(3.W), !invalid && conv.io.intExceptionFlags(0))
}
}
}
}
io.out.valid := valid
io.out.bits.lt := dcmp.io.lt || (dcmp.io.a.asSInt < 0.S && dcmp.io.b.asSInt >= 0.S)
io.out.bits.in := in
}
class IntToFP(val latency: Int)(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
val io = IO(new Bundle {
val in = Flipped(Valid(new IntToFPInput))
val out = Valid(new FPResult)
})
val in = Pipe(io.in)
val tag = in.bits.typeTagIn
val mux = Wire(new FPResult)
mux.exc := 0.U
mux.data := recode(in.bits.in1, tag)
val intValue = {
val res = WireDefault(in.bits.in1.asSInt)
for (i <- 0 until nIntTypes-1) {
val smallInt = in.bits.in1((minXLen << i) - 1, 0)
when (in.bits.typ.extract(log2Ceil(nIntTypes), 1) === i.U) {
res := Mux(in.bits.typ(0), smallInt.zext, smallInt.asSInt)
}
}
res.asUInt
}
when (in.bits.wflags) { // fcvt
// could be improved for RVD/RVQ with a single variable-position rounding
// unit, rather than N fixed-position ones
val i2fResults = for (t <- floatTypes) yield {
val i2f = Module(new hardfloat.INToRecFN(xLen, t.exp, t.sig))
i2f.io.signedIn := ~in.bits.typ(0)
i2f.io.in := intValue
i2f.io.roundingMode := in.bits.rm
i2f.io.detectTininess := hardfloat.consts.tininess_afterRounding
(sanitizeNaN(i2f.io.out, t), i2f.io.exceptionFlags)
}
val (data, exc) = i2fResults.unzip
val dataPadded = data.init.map(d => Cat(data.last >> d.getWidth, d)) :+ data.last
mux.data := dataPadded(tag)
mux.exc := exc(tag)
}
io.out <> Pipe(in.valid, mux, latency-1)
}
class FPToFP(val latency: Int)(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new FPResult)
val lt = Input(Bool()) // from FPToInt
})
val in = Pipe(io.in)
val signNum = Mux(in.bits.rm(1), in.bits.in1 ^ in.bits.in2, Mux(in.bits.rm(0), ~in.bits.in2, in.bits.in2))
val fsgnj = Cat(signNum(fLen), in.bits.in1(fLen-1, 0))
val fsgnjMux = Wire(new FPResult)
fsgnjMux.exc := 0.U
fsgnjMux.data := fsgnj
when (in.bits.wflags) { // fmin/fmax
val isnan1 = maxType.isNaN(in.bits.in1)
val isnan2 = maxType.isNaN(in.bits.in2)
val isInvalid = maxType.isSNaN(in.bits.in1) || maxType.isSNaN(in.bits.in2)
val isNaNOut = isnan1 && isnan2
val isLHS = isnan2 || in.bits.rm(0) =/= io.lt && !isnan1
fsgnjMux.exc := isInvalid << 4
fsgnjMux.data := Mux(isNaNOut, maxType.qNaN, Mux(isLHS, in.bits.in1, in.bits.in2))
}
val inTag = in.bits.typeTagIn
val outTag = in.bits.typeTagOut
val mux = WireDefault(fsgnjMux)
for (t <- floatTypes.init) {
when (outTag === typeTag(t).U) {
mux.data := Cat(fsgnjMux.data >> t.recodedWidth, maxType.unsafeConvert(fsgnjMux.data, t))
}
}
when (in.bits.wflags && !in.bits.ren2) { // fcvt
if (floatTypes.size > 1) {
// widening conversions simply canonicalize NaN operands
val widened = Mux(maxType.isNaN(in.bits.in1), maxType.qNaN, in.bits.in1)
fsgnjMux.data := widened
fsgnjMux.exc := maxType.isSNaN(in.bits.in1) << 4
// narrowing conversions require rounding (for RVQ, this could be
// optimized to use a single variable-position rounding unit, rather
// than two fixed-position ones)
for (outType <- floatTypes.init) when (outTag === typeTag(outType).U && ((typeTag(outType) == 0).B || outTag < inTag)) {
val narrower = Module(new hardfloat.RecFNToRecFN(maxType.exp, maxType.sig, outType.exp, outType.sig))
narrower.io.in := in.bits.in1
narrower.io.roundingMode := in.bits.rm
narrower.io.detectTininess := hardfloat.consts.tininess_afterRounding
val narrowed = sanitizeNaN(narrower.io.out, outType)
mux.data := Cat(fsgnjMux.data >> narrowed.getWidth, narrowed)
mux.exc := narrower.io.exceptionFlags
}
}
}
io.out <> Pipe(in.valid, mux, latency-1)
}
class MulAddRecFNPipe(latency: Int, expWidth: Int, sigWidth: Int) extends Module
{
override def desiredName = s"MulAddRecFNPipe_l${latency}_e${expWidth}_s${sigWidth}"
require(latency<=2)
val io = IO(new Bundle {
val validin = Input(Bool())
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
val validout = Output(Bool())
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul = Module(new hardfloat.MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul = Module(new hardfloat.MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
val valid_stage0 = Wire(Bool())
val roundingMode_stage0 = Wire(UInt(3.W))
val detectTininess_stage0 = Wire(UInt(1.W))
val postmul_regs = if(latency>0) 1 else 0
mulAddRecFNToRaw_postMul.io.fromPreMul := Pipe(io.validin, mulAddRecFNToRaw_preMul.io.toPostMul, postmul_regs).bits
mulAddRecFNToRaw_postMul.io.mulAddResult := Pipe(io.validin, mulAddResult, postmul_regs).bits
mulAddRecFNToRaw_postMul.io.roundingMode := Pipe(io.validin, io.roundingMode, postmul_regs).bits
roundingMode_stage0 := Pipe(io.validin, io.roundingMode, postmul_regs).bits
detectTininess_stage0 := Pipe(io.validin, io.detectTininess, postmul_regs).bits
valid_stage0 := Pipe(io.validin, false.B, postmul_regs).valid
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN = Module(new hardfloat.RoundRawFNToRecFN(expWidth, sigWidth, 0))
val round_regs = if(latency==2) 1 else 0
roundRawFNToRecFN.io.invalidExc := Pipe(valid_stage0, mulAddRecFNToRaw_postMul.io.invalidExc, round_regs).bits
roundRawFNToRecFN.io.in := Pipe(valid_stage0, mulAddRecFNToRaw_postMul.io.rawOut, round_regs).bits
roundRawFNToRecFN.io.roundingMode := Pipe(valid_stage0, roundingMode_stage0, round_regs).bits
roundRawFNToRecFN.io.detectTininess := Pipe(valid_stage0, detectTininess_stage0, round_regs).bits
io.validout := Pipe(valid_stage0, false.B, round_regs).valid
roundRawFNToRecFN.io.infiniteExc := false.B
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
class FPUFMAPipe(val latency: Int, val t: FType)
(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
override def desiredName = s"FPUFMAPipe_l${latency}_f${t.ieeeWidth}"
require(latency>0)
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new FPResult)
})
val valid = RegNext(io.in.valid)
val in = Reg(new FPInput)
when (io.in.valid) {
val one = 1.U << (t.sig + t.exp - 1)
val zero = (io.in.bits.in1 ^ io.in.bits.in2) & (1.U << (t.sig + t.exp))
val cmd_fma = io.in.bits.ren3
val cmd_addsub = io.in.bits.swap23
in := io.in.bits
when (cmd_addsub) { in.in2 := one }
when (!(cmd_fma || cmd_addsub)) { in.in3 := zero }
}
val fma = Module(new MulAddRecFNPipe((latency-1) min 2, t.exp, t.sig))
fma.io.validin := valid
fma.io.op := in.fmaCmd
fma.io.roundingMode := in.rm
fma.io.detectTininess := hardfloat.consts.tininess_afterRounding
fma.io.a := in.in1
fma.io.b := in.in2
fma.io.c := in.in3
val res = Wire(new FPResult)
res.data := sanitizeNaN(fma.io.out, t)
res.exc := fma.io.exceptionFlags
io.out := Pipe(fma.io.validout, res, (latency-3) max 0)
}
class FPU(cfg: FPUParams)(implicit p: Parameters) extends FPUModule()(p) {
val io = IO(new FPUIO)
val (useClockGating, useDebugROB) = coreParams match {
case r: RocketCoreParams =>
val sz = if (r.debugROB.isDefined) r.debugROB.get.size else 1
(r.clockGate, sz < 1)
case _ => (false, false)
}
val clock_en_reg = Reg(Bool())
val clock_en = clock_en_reg || io.cp_req.valid
val gated_clock =
if (!useClockGating) clock
else ClockGate(clock, clock_en, "fpu_clock_gate")
val fp_decoder = Module(new FPUDecoder)
fp_decoder.io.inst := io.inst
val id_ctrl = WireInit(fp_decoder.io.sigs)
coreParams match { case r: RocketCoreParams => r.vector.map(v => {
val v_decode = v.decoder(p) // Only need to get ren1
v_decode.io.inst := io.inst
v_decode.io.vconfig := DontCare // core deals with this
when (v_decode.io.legal && v_decode.io.read_frs1) {
id_ctrl.ren1 := true.B
id_ctrl.swap12 := false.B
id_ctrl.toint := true.B
id_ctrl.typeTagIn := I
id_ctrl.typeTagOut := Mux(io.v_sew === 3.U, D, S)
}
when (v_decode.io.write_frd) { id_ctrl.wen := true.B }
})}
val ex_reg_valid = RegNext(io.valid, false.B)
val ex_reg_inst = RegEnable(io.inst, io.valid)
val ex_reg_ctrl = RegEnable(id_ctrl, io.valid)
val ex_ra = List.fill(3)(Reg(UInt()))
// load/vector response
val load_wb = RegNext(io.ll_resp_val)
val load_wb_typeTag = RegEnable(io.ll_resp_type(1,0) - typeTagWbOffset, io.ll_resp_val)
val load_wb_data = RegEnable(io.ll_resp_data, io.ll_resp_val)
val load_wb_tag = RegEnable(io.ll_resp_tag, io.ll_resp_val)
class FPUImpl { // entering gated-clock domain
val req_valid = ex_reg_valid || io.cp_req.valid
val ex_cp_valid = io.cp_req.fire
val mem_cp_valid = RegNext(ex_cp_valid, false.B)
val wb_cp_valid = RegNext(mem_cp_valid, false.B)
val mem_reg_valid = RegInit(false.B)
val killm = (io.killm || io.nack_mem) && !mem_cp_valid
// Kill X-stage instruction if M-stage is killed. This prevents it from
// speculatively being sent to the div-sqrt unit, which can cause priority
// inversion for two back-to-back divides, the first of which is killed.
val killx = io.killx || mem_reg_valid && killm
mem_reg_valid := ex_reg_valid && !killx || ex_cp_valid
val mem_reg_inst = RegEnable(ex_reg_inst, ex_reg_valid)
val wb_reg_valid = RegNext(mem_reg_valid && (!killm || mem_cp_valid), false.B)
val cp_ctrl = Wire(new FPUCtrlSigs)
cp_ctrl :<>= io.cp_req.bits.viewAsSupertype(new FPUCtrlSigs)
io.cp_resp.valid := false.B
io.cp_resp.bits.data := 0.U
io.cp_resp.bits.exc := DontCare
val ex_ctrl = Mux(ex_cp_valid, cp_ctrl, ex_reg_ctrl)
val mem_ctrl = RegEnable(ex_ctrl, req_valid)
val wb_ctrl = RegEnable(mem_ctrl, mem_reg_valid)
// CoreMonitorBundle to monitor fp register file writes
val frfWriteBundle = Seq.fill(2)(WireInit(new CoreMonitorBundle(xLen, fLen), DontCare))
frfWriteBundle.foreach { i =>
i.clock := clock
i.reset := reset
i.hartid := io.hartid
i.timer := io.time(31,0)
i.valid := false.B
i.wrenx := false.B
i.wrenf := false.B
i.excpt := false.B
}
// regfile
val regfile = Mem(32, Bits((fLen+1).W))
when (load_wb) {
val wdata = recode(load_wb_data, load_wb_typeTag)
regfile(load_wb_tag) := wdata
assert(consistent(wdata))
if (enableCommitLog)
printf("f%d p%d 0x%x\n", load_wb_tag, load_wb_tag + 32.U, ieee(wdata))
if (useDebugROB)
DebugROB.pushWb(clock, reset, io.hartid, load_wb, load_wb_tag + 32.U, ieee(wdata))
frfWriteBundle(0).wrdst := load_wb_tag
frfWriteBundle(0).wrenf := true.B
frfWriteBundle(0).wrdata := ieee(wdata)
}
val ex_rs = ex_ra.map(a => regfile(a))
when (io.valid) {
when (id_ctrl.ren1) {
when (!id_ctrl.swap12) { ex_ra(0) := io.inst(19,15) }
when (id_ctrl.swap12) { ex_ra(1) := io.inst(19,15) }
}
when (id_ctrl.ren2) {
when (id_ctrl.swap12) { ex_ra(0) := io.inst(24,20) }
when (id_ctrl.swap23) { ex_ra(2) := io.inst(24,20) }
when (!id_ctrl.swap12 && !id_ctrl.swap23) { ex_ra(1) := io.inst(24,20) }
}
when (id_ctrl.ren3) { ex_ra(2) := io.inst(31,27) }
}
val ex_rm = Mux(ex_reg_inst(14,12) === 7.U, io.fcsr_rm, ex_reg_inst(14,12))
def fuInput(minT: Option[FType]): FPInput = {
val req = Wire(new FPInput)
val tag = ex_ctrl.typeTagIn
req.viewAsSupertype(new Bundle with HasFPUCtrlSigs) :#= ex_ctrl.viewAsSupertype(new Bundle with HasFPUCtrlSigs)
req.rm := ex_rm
req.in1 := unbox(ex_rs(0), tag, minT)
req.in2 := unbox(ex_rs(1), tag, minT)
req.in3 := unbox(ex_rs(2), tag, minT)
req.typ := ex_reg_inst(21,20)
req.fmt := ex_reg_inst(26,25)
req.fmaCmd := ex_reg_inst(3,2) | (!ex_ctrl.ren3 && ex_reg_inst(27))
when (ex_cp_valid) {
req := io.cp_req.bits
when (io.cp_req.bits.swap12) {
req.in1 := io.cp_req.bits.in2
req.in2 := io.cp_req.bits.in1
}
when (io.cp_req.bits.swap23) {
req.in2 := io.cp_req.bits.in3
req.in3 := io.cp_req.bits.in2
}
}
req
}
val sfma = Module(new FPUFMAPipe(cfg.sfmaLatency, FType.S))
sfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === S
sfma.io.in.bits := fuInput(Some(sfma.t))
val fpiu = Module(new FPToInt)
fpiu.io.in.valid := req_valid && (ex_ctrl.toint || ex_ctrl.div || ex_ctrl.sqrt || (ex_ctrl.fastpipe && ex_ctrl.wflags))
fpiu.io.in.bits := fuInput(None)
io.store_data := fpiu.io.out.bits.store
io.toint_data := fpiu.io.out.bits.toint
when(fpiu.io.out.valid && mem_cp_valid && mem_ctrl.toint){
io.cp_resp.bits.data := fpiu.io.out.bits.toint
io.cp_resp.valid := true.B
}
val ifpu = Module(new IntToFP(cfg.ifpuLatency))
ifpu.io.in.valid := req_valid && ex_ctrl.fromint
ifpu.io.in.bits := fpiu.io.in.bits
ifpu.io.in.bits.in1 := Mux(ex_cp_valid, io.cp_req.bits.in1, io.fromint_data)
val fpmu = Module(new FPToFP(cfg.fpmuLatency))
fpmu.io.in.valid := req_valid && ex_ctrl.fastpipe
fpmu.io.in.bits := fpiu.io.in.bits
fpmu.io.lt := fpiu.io.out.bits.lt
val divSqrt_wen = WireDefault(false.B)
val divSqrt_inFlight = WireDefault(false.B)
val divSqrt_waddr = Reg(UInt(5.W))
val divSqrt_cp = Reg(Bool())
val divSqrt_typeTag = Wire(UInt(log2Up(floatTypes.size).W))
val divSqrt_wdata = Wire(UInt((fLen+1).W))
val divSqrt_flags = Wire(UInt(FPConstants.FLAGS_SZ.W))
divSqrt_typeTag := DontCare
divSqrt_wdata := DontCare
divSqrt_flags := DontCare
// writeback arbitration
case class Pipe(p: Module, lat: Int, cond: (FPUCtrlSigs) => Bool, res: FPResult)
val pipes = List(
Pipe(fpmu, fpmu.latency, (c: FPUCtrlSigs) => c.fastpipe, fpmu.io.out.bits),
Pipe(ifpu, ifpu.latency, (c: FPUCtrlSigs) => c.fromint, ifpu.io.out.bits),
Pipe(sfma, sfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === S, sfma.io.out.bits)) ++
(fLen > 32).option({
val dfma = Module(new FPUFMAPipe(cfg.dfmaLatency, FType.D))
dfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === D
dfma.io.in.bits := fuInput(Some(dfma.t))
Pipe(dfma, dfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === D, dfma.io.out.bits)
}) ++
(minFLen == 16).option({
val hfma = Module(new FPUFMAPipe(cfg.sfmaLatency, FType.H))
hfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === H
hfma.io.in.bits := fuInput(Some(hfma.t))
Pipe(hfma, hfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === H, hfma.io.out.bits)
})
def latencyMask(c: FPUCtrlSigs, offset: Int) = {
require(pipes.forall(_.lat >= offset))
pipes.map(p => Mux(p.cond(c), (1 << p.lat-offset).U, 0.U)).reduce(_|_)
}
def pipeid(c: FPUCtrlSigs) = pipes.zipWithIndex.map(p => Mux(p._1.cond(c), p._2.U, 0.U)).reduce(_|_)
val maxLatency = pipes.map(_.lat).max
val memLatencyMask = latencyMask(mem_ctrl, 2)
class WBInfo extends Bundle {
val rd = UInt(5.W)
val typeTag = UInt(log2Up(floatTypes.size).W)
val cp = Bool()
val pipeid = UInt(log2Ceil(pipes.size).W)
}
val wen = RegInit(0.U((maxLatency-1).W))
val wbInfo = Reg(Vec(maxLatency-1, new WBInfo))
val mem_wen = mem_reg_valid && (mem_ctrl.fma || mem_ctrl.fastpipe || mem_ctrl.fromint)
val write_port_busy = RegEnable(mem_wen && (memLatencyMask & latencyMask(ex_ctrl, 1)).orR || (wen & latencyMask(ex_ctrl, 0)).orR, req_valid)
ccover(mem_reg_valid && write_port_busy, "WB_STRUCTURAL", "structural hazard on writeback")
for (i <- 0 until maxLatency-2) {
when (wen(i+1)) { wbInfo(i) := wbInfo(i+1) }
}
wen := wen >> 1
when (mem_wen) {
when (!killm) {
wen := wen >> 1 | memLatencyMask
}
for (i <- 0 until maxLatency-1) {
when (!write_port_busy && memLatencyMask(i)) {
wbInfo(i).cp := mem_cp_valid
wbInfo(i).typeTag := mem_ctrl.typeTagOut
wbInfo(i).pipeid := pipeid(mem_ctrl)
wbInfo(i).rd := mem_reg_inst(11,7)
}
}
}
val waddr = Mux(divSqrt_wen, divSqrt_waddr, wbInfo(0).rd)
val wb_cp = Mux(divSqrt_wen, divSqrt_cp, wbInfo(0).cp)
val wtypeTag = Mux(divSqrt_wen, divSqrt_typeTag, wbInfo(0).typeTag)
val wdata = box(Mux(divSqrt_wen, divSqrt_wdata, (pipes.map(_.res.data): Seq[UInt])(wbInfo(0).pipeid)), wtypeTag)
val wexc = (pipes.map(_.res.exc): Seq[UInt])(wbInfo(0).pipeid)
when ((!wbInfo(0).cp && wen(0)) || divSqrt_wen) {
assert(consistent(wdata))
regfile(waddr) := wdata
if (enableCommitLog) {
printf("f%d p%d 0x%x\n", waddr, waddr + 32.U, ieee(wdata))
}
frfWriteBundle(1).wrdst := waddr
frfWriteBundle(1).wrenf := true.B
frfWriteBundle(1).wrdata := ieee(wdata)
}
if (useDebugROB) {
DebugROB.pushWb(clock, reset, io.hartid, (!wbInfo(0).cp && wen(0)) || divSqrt_wen, waddr + 32.U, ieee(wdata))
}
when (wb_cp && (wen(0) || divSqrt_wen)) {
io.cp_resp.bits.data := wdata
io.cp_resp.valid := true.B
}
assert(!io.cp_req.valid || pipes.forall(_.lat == pipes.head.lat).B,
s"FPU only supports coprocessor if FMA pipes have uniform latency ${pipes.map(_.lat)}")
// Avoid structural hazards and nacking of external requests
// toint responds in the MEM stage, so an incoming toint can induce a structural hazard against inflight FMAs
io.cp_req.ready := !ex_reg_valid && !(cp_ctrl.toint && wen =/= 0.U) && !divSqrt_inFlight
val wb_toint_valid = wb_reg_valid && wb_ctrl.toint
val wb_toint_exc = RegEnable(fpiu.io.out.bits.exc, mem_ctrl.toint)
io.fcsr_flags.valid := wb_toint_valid || divSqrt_wen || wen(0)
io.fcsr_flags.bits :=
Mux(wb_toint_valid, wb_toint_exc, 0.U) |
Mux(divSqrt_wen, divSqrt_flags, 0.U) |
Mux(wen(0), wexc, 0.U)
val divSqrt_write_port_busy = (mem_ctrl.div || mem_ctrl.sqrt) && wen.orR
io.fcsr_rdy := !(ex_reg_valid && ex_ctrl.wflags || mem_reg_valid && mem_ctrl.wflags || wb_reg_valid && wb_ctrl.toint || wen.orR || divSqrt_inFlight)
io.nack_mem := (write_port_busy || divSqrt_write_port_busy || divSqrt_inFlight) && !mem_cp_valid
io.dec <> id_ctrl
def useScoreboard(f: ((Pipe, Int)) => Bool) = pipes.zipWithIndex.filter(_._1.lat > 3).map(x => f(x)).fold(false.B)(_||_)
io.sboard_set := wb_reg_valid && !wb_cp_valid && RegNext(useScoreboard(_._1.cond(mem_ctrl)) || mem_ctrl.div || mem_ctrl.sqrt || mem_ctrl.vec)
io.sboard_clr := !wb_cp_valid && (divSqrt_wen || (wen(0) && useScoreboard(x => wbInfo(0).pipeid === x._2.U)))
io.sboard_clra := waddr
ccover(io.sboard_clr && load_wb, "DUAL_WRITEBACK", "load and FMA writeback on same cycle")
// we don't currently support round-max-magnitude (rm=4)
io.illegal_rm := io.inst(14,12).isOneOf(5.U, 6.U) || io.inst(14,12) === 7.U && io.fcsr_rm >= 5.U
if (cfg.divSqrt) {
val divSqrt_inValid = mem_reg_valid && (mem_ctrl.div || mem_ctrl.sqrt) && !divSqrt_inFlight
val divSqrt_killed = RegNext(divSqrt_inValid && killm, true.B)
when (divSqrt_inValid) {
divSqrt_waddr := mem_reg_inst(11,7)
divSqrt_cp := mem_cp_valid
}
ccover(divSqrt_inFlight && divSqrt_killed, "DIV_KILLED", "divide killed after issued to divider")
ccover(divSqrt_inFlight && mem_reg_valid && (mem_ctrl.div || mem_ctrl.sqrt), "DIV_BUSY", "divider structural hazard")
ccover(mem_reg_valid && divSqrt_write_port_busy, "DIV_WB_STRUCTURAL", "structural hazard on division writeback")
for (t <- floatTypes) {
val tag = mem_ctrl.typeTagOut
val divSqrt = withReset(divSqrt_killed) { Module(new hardfloat.DivSqrtRecFN_small(t.exp, t.sig, 0)) }
divSqrt.io.inValid := divSqrt_inValid && tag === typeTag(t).U
divSqrt.io.sqrtOp := mem_ctrl.sqrt
divSqrt.io.a := maxType.unsafeConvert(fpiu.io.out.bits.in.in1, t)
divSqrt.io.b := maxType.unsafeConvert(fpiu.io.out.bits.in.in2, t)
divSqrt.io.roundingMode := fpiu.io.out.bits.in.rm
divSqrt.io.detectTininess := hardfloat.consts.tininess_afterRounding
when (!divSqrt.io.inReady) { divSqrt_inFlight := true.B } // only 1 in flight
when (divSqrt.io.outValid_div || divSqrt.io.outValid_sqrt) {
divSqrt_wen := !divSqrt_killed
divSqrt_wdata := sanitizeNaN(divSqrt.io.out, t)
divSqrt_flags := divSqrt.io.exceptionFlags
divSqrt_typeTag := typeTag(t).U
}
}
when (divSqrt_killed) { divSqrt_inFlight := false.B }
} else {
when (id_ctrl.div || id_ctrl.sqrt) { io.illegal_rm := true.B }
}
// gate the clock
clock_en_reg := !useClockGating.B ||
io.keep_clock_enabled || // chicken bit
io.valid || // ID stage
req_valid || // EX stage
mem_reg_valid || mem_cp_valid || // MEM stage
wb_reg_valid || wb_cp_valid || // WB stage
wen.orR || divSqrt_inFlight || // post-WB stage
io.ll_resp_val // load writeback
} // leaving gated-clock domain
val fpuImpl = withClock (gated_clock) { new FPUImpl }
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
property.cover(cond, s"FPU_$label", "Core;;" + desc)
}
File fNFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
object fNFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits) =
{
val minNormExp = (BigInt(1)<<(expWidth - 1)) + 2
val rawIn = rawFloatFromRecFN(expWidth, sigWidth, in)
val isSubnormal = rawIn.sExp < minNormExp.S
val denormShiftDist = 1.U - rawIn.sExp(log2Up(sigWidth - 1) - 1, 0)
val denormFract = ((rawIn.sig>>1)>>denormShiftDist)(sigWidth - 2, 0)
val expOut =
Mux(isSubnormal,
0.U,
rawIn.sExp(expWidth - 1, 0) -
((BigInt(1)<<(expWidth - 1)) + 1).U
) | Fill(expWidth, rawIn.isNaN || rawIn.isInf)
val fractOut =
Mux(isSubnormal,
denormFract,
Mux(rawIn.isInf, 0.U, rawIn.sig(sigWidth - 2, 0))
)
Cat(rawIn.sign, expOut, fractOut)
}
}
File regfile.scala:
//******************************************************************************
// Copyright (c) 2013 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Register File (Abstract class and Synthesizable RegFile)
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.exu
import scala.collection.mutable.ArrayBuffer
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import boom.v3.common._
import boom.v3.util.{BoomCoreStringPrefix}
/**
* IO bundle for a register read port
*
* @param addrWidth size of register address in bits
* @param dataWidth size of register in bits
*/
class RegisterFileReadPortIO(val addrWidth: Int, val dataWidth: Int)(implicit p: Parameters) extends BoomBundle
{
val addr = Input(UInt(addrWidth.W))
val data = Output(UInt(dataWidth.W))
}
/**
* IO bundle for the register write port
*
* @param addrWidth size of register address in bits
* @param dataWidth size of register in bits
*/
class RegisterFileWritePort(val addrWidth: Int, val dataWidth: Int)(implicit p: Parameters) extends BoomBundle
{
val addr = UInt(addrWidth.W)
val data = UInt(dataWidth.W)
}
/**
* Utility function to turn ExeUnitResps to match the regfile's WritePort I/Os.
*/
object WritePort
{
def apply(enq: DecoupledIO[ExeUnitResp], addrWidth: Int, dataWidth: Int, rtype: UInt)
(implicit p: Parameters): Valid[RegisterFileWritePort] = {
val wport = Wire(Valid(new RegisterFileWritePort(addrWidth, dataWidth)))
wport.valid := enq.valid && enq.bits.uop.dst_rtype === rtype
wport.bits.addr := enq.bits.uop.pdst
wport.bits.data := enq.bits.data
enq.ready := true.B
wport
}
}
/**
* Register file abstract class
*
* @param numRegisters number of registers
* @param numReadPorts number of read ports
* @param numWritePorts number of write ports
* @param registerWidth size of registers in bits
* @param bypassableArray list of write ports from func units to the read port of the regfile
*/
abstract class RegisterFile(
numRegisters: Int,
numReadPorts: Int,
numWritePorts: Int,
registerWidth: Int,
bypassableArray: Seq[Boolean]) // which write ports can be bypassed to the read ports?
(implicit p: Parameters) extends BoomModule
{
val io = IO(new BoomBundle {
val read_ports = Vec(numReadPorts, new RegisterFileReadPortIO(maxPregSz, registerWidth))
val write_ports = Flipped(Vec(numWritePorts, Valid(new RegisterFileWritePort(maxPregSz, registerWidth))))
})
private val rf_cost = (numReadPorts + numWritePorts) * (numReadPorts + 2*numWritePorts)
private val type_str = if (registerWidth == fLen+1) "Floating Point" else "Integer"
override def toString: String = BoomCoreStringPrefix(
"==" + type_str + " Regfile==",
"Num RF Read Ports : " + numReadPorts,
"Num RF Write Ports : " + numWritePorts,
"RF Cost (R+W)*(R+2W) : " + rf_cost,
"Bypassable Units : " + bypassableArray)
}
/**
* A synthesizable model of a Register File. You will likely want to blackbox this for more than modest port counts.
*
* @param numRegisters number of registers
* @param numReadPorts number of read ports
* @param numWritePorts number of write ports
* @param registerWidth size of registers in bits
* @param bypassableArray list of write ports from func units to the read port of the regfile
*/
class RegisterFileSynthesizable(
numRegisters: Int,
numReadPorts: Int,
numWritePorts: Int,
registerWidth: Int,
bypassableArray: Seq[Boolean])
(implicit p: Parameters)
extends RegisterFile(numRegisters, numReadPorts, numWritePorts, registerWidth, bypassableArray)
{
// --------------------------------------------------------------
val regfile = Mem(numRegisters, UInt(registerWidth.W))
// --------------------------------------------------------------
// Read ports.
val read_data = Wire(Vec(numReadPorts, UInt(registerWidth.W)))
// Register the read port addresses to give a full cycle to the RegisterRead Stage (if desired).
val read_addrs = io.read_ports.map(p => RegNext(p.addr))
for (i <- 0 until numReadPorts) {
read_data(i) := regfile(read_addrs(i))
}
// --------------------------------------------------------------
// Bypass out of the ALU's write ports.
// We are assuming we cannot bypass a writer to a reader within the regfile memory
// for a write that occurs at the end of cycle S1 and a read that returns data on cycle S1.
// But since these bypasses are expensive, and not all write ports need to bypass their data,
// only perform the w->r bypass on a select number of write ports.
require (bypassableArray.length == io.write_ports.length)
if (bypassableArray.reduce(_||_)) {
val bypassable_wports = ArrayBuffer[Valid[RegisterFileWritePort]]()
io.write_ports zip bypassableArray map { case (wport, b) => if (b) { bypassable_wports += wport} }
for (i <- 0 until numReadPorts) {
val bypass_ens = bypassable_wports.map(x => x.valid &&
x.bits.addr === read_addrs(i))
val bypass_data = Mux1H(VecInit(bypass_ens.toSeq), VecInit(bypassable_wports.map(_.bits.data).toSeq))
io.read_ports(i).data := Mux(bypass_ens.reduce(_|_), bypass_data, read_data(i))
}
} else {
for (i <- 0 until numReadPorts) {
io.read_ports(i).data := read_data(i)
}
}
// --------------------------------------------------------------
// Write ports.
for (wport <- io.write_ports) {
when (wport.valid) {
regfile(wport.bits.addr) := wport.bits.data
}
}
// ensure there is only 1 writer per register (unless to preg0)
if (numWritePorts > 1) {
for (i <- 0 until (numWritePorts - 1)) {
for (j <- (i + 1) until numWritePorts) {
assert(!io.write_ports(i).valid ||
!io.write_ports(j).valid ||
(io.write_ports(i).bits.addr =/= io.write_ports(j).bits.addr) ||
(io.write_ports(i).bits.addr === 0.U), // note: you only have to check one here
"[regfile] too many writers a register")
}
}
}
}
File execution-units.scala:
//******************************************************************************
// Copyright (c) 2015 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// RISC-V Constructing the Execution Units
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.exu
import scala.collection.mutable.{ArrayBuffer}
import chisel3._
import org.chipsalliance.cde.config.{Parameters}
import boom.v3.common._
import boom.v3.util.{BoomCoreStringPrefix}
/**
* Top level class to wrap all execution units together into a "collection"
*
* @param fpu using a FPU?
*/
class ExecutionUnits(val fpu: Boolean)(implicit val p: Parameters) extends HasBoomCoreParameters
{
val totalIssueWidth = issueParams.map(_.issueWidth).sum
//*******************************
// Instantiate the ExecutionUnits
private val exe_units = ArrayBuffer[ExecutionUnit]()
//*******************************
// Act like a collection
def length = exe_units.length
def apply(n: Int) = exe_units(n)
def map[T](f: ExecutionUnit => T) = {
exe_units.map(f)
}
def withFilter(f: ExecutionUnit => Boolean) = {
exe_units.withFilter(f)
}
def foreach[U](f: ExecutionUnit => U) = {
exe_units.foreach(f)
}
def zipWithIndex = {
exe_units.zipWithIndex
}
def indexWhere(f: ExecutionUnit => Boolean) = {
exe_units.indexWhere(f)
}
def count(f: ExecutionUnit => Boolean) = {
exe_units.count(f)
}
lazy val memory_units = {
exe_units.filter(_.hasMem)
}
lazy val alu_units = {
exe_units.filter(_.hasAlu)
}
lazy val csr_unit = {
require (exe_units.count(_.hasCSR) == 1)
exe_units.find(_.hasCSR).get
}
lazy val ifpu_unit = {
require (usingFPU)
require (exe_units.count(_.hasIfpu) == 1)
exe_units.find(_.hasIfpu).get
}
lazy val fpiu_unit = {
require (usingFPU)
require (exe_units.count(_.hasFpiu) == 1)
exe_units.find(_.hasFpiu).get
}
lazy val jmp_unit_idx = {
exe_units.indexWhere(_.hasJmpUnit)
}
lazy val rocc_unit = {
require (usingRoCC)
require (exe_units.count(_.hasRocc) == 1)
exe_units.find(_.hasRocc).get
}
if (!fpu) {
val int_width = issueParams.find(_.iqType == IQT_INT.litValue).get.issueWidth
for (w <- 0 until memWidth) {
val memExeUnit = Module(new ALUExeUnit(
hasAlu = false,
hasMem = true))
memExeUnit.io.ll_iresp.ready := DontCare
exe_units += memExeUnit
}
for (w <- 0 until int_width) {
def is_nth(n: Int): Boolean = w == ((n) % int_width)
val alu_exe_unit = Module(new ALUExeUnit(
hasJmpUnit = is_nth(0),
hasCSR = is_nth(1),
hasRocc = is_nth(1) && usingRoCC,
hasMul = is_nth(2),
hasDiv = is_nth(3),
hasIfpu = is_nth(4) && usingFPU))
exe_units += alu_exe_unit
}
} else {
val fp_width = issueParams.find(_.iqType == IQT_FP.litValue).get.issueWidth
for (w <- 0 until fp_width) {
val fpu_exe_unit = Module(new FPUExeUnit(hasFpu = true,
hasFdiv = usingFDivSqrt && (w==0),
hasFpiu = (w==0)))
exe_units += fpu_exe_unit
}
}
val exeUnitsStr = new StringBuilder
for (exe_unit <- exe_units) {
exeUnitsStr.append(exe_unit.toString)
}
override def toString: String =
(BoomCoreStringPrefix("===ExecutionUnits===") + "\n"
+ (if (!fpu) {
BoomCoreStringPrefix(
"==" + coreWidth + "-wide Machine==",
"==" + totalIssueWidth + " Issue==")
} else {
""
}) + "\n"
+ exeUnitsStr.toString)
require (exe_units.length != 0)
if (!fpu) {
// if this is for FPU units, we don't need a memory unit (or other integer units).
require (exe_units.map(_.hasMem).reduce(_|_), "Datapath is missing a memory unit.")
require (exe_units.map(_.hasMul).reduce(_|_), "Datapath is missing a multiplier.")
require (exe_units.map(_.hasDiv).reduce(_|_), "Datapath is missing a divider.")
} else {
require (exe_units.map(_.hasFpu).reduce(_|_),
"Datapath is missing a fpu (or has an fpu and shouldnt).")
}
val numIrfReaders = exe_units.count(_.readsIrf)
val numIrfReadPorts = exe_units.count(_.readsIrf) * 2
val numIrfWritePorts = exe_units.count(_.writesIrf)
val numLlIrfWritePorts = exe_units.count(_.writesLlIrf)
val numTotalBypassPorts = exe_units.withFilter(_.bypassable).map(_.numBypassStages).foldLeft(0)(_+_)
val numFrfReaders = exe_units.count(_.readsFrf)
val numFrfReadPorts = exe_units.count(_.readsFrf) * 3
val numFrfWritePorts = exe_units.count(_.writesFrf)
val numLlFrfWritePorts = exe_units.count(_.writesLlFrf)
// The mem-unit will also bypass writes to readers in the RRD stage.
// NOTE: This does NOT include the ll_wport
val bypassable_write_port_mask = exe_units.withFilter(x => x.writesIrf).map(u => u.bypassable)
}
File rawFloatFromFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
object rawFloatFromFN {
def apply(expWidth: Int, sigWidth: Int, in: Bits) = {
val sign = in(expWidth + sigWidth - 1)
val expIn = in(expWidth + sigWidth - 2, sigWidth - 1)
val fractIn = in(sigWidth - 2, 0)
val isZeroExpIn = (expIn === 0.U)
val isZeroFractIn = (fractIn === 0.U)
val normDist = countLeadingZeros(fractIn)
val subnormFract = (fractIn << normDist) (sigWidth - 3, 0) << 1
val adjustedExp =
Mux(isZeroExpIn,
normDist ^ ((BigInt(1) << (expWidth + 1)) - 1).U,
expIn
) + ((BigInt(1) << (expWidth - 1)).U
| Mux(isZeroExpIn, 2.U, 1.U))
val isZero = isZeroExpIn && isZeroFractIn
val isSpecial = adjustedExp(expWidth, expWidth - 1) === 3.U
val out = Wire(new RawFloat(expWidth, sigWidth))
out.isNaN := isSpecial && !isZeroFractIn
out.isInf := isSpecial && isZeroFractIn
out.isZero := isZero
out.sign := sign
out.sExp := adjustedExp(expWidth, 0).zext
out.sig :=
0.U(1.W) ## !isZero ## Mux(isZeroExpIn, subnormFract, fractIn)
out
}
}
File rawFloatFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
/*----------------------------------------------------------------------------
| In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be
| set.
*----------------------------------------------------------------------------*/
object rawFloatFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat =
{
val exp = in(expWidth + sigWidth - 1, sigWidth - 1)
val isZero = exp(expWidth, expWidth - 2) === 0.U
val isSpecial = exp(expWidth, expWidth - 1) === 3.U
val out = Wire(new RawFloat(expWidth, sigWidth))
out.isNaN := isSpecial && exp(expWidth - 2)
out.isInf := isSpecial && ! exp(expWidth - 2)
out.isZero := isZero
out.sign := in(expWidth + sigWidth)
out.sExp := exp.zext
out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0)
out
}
}
| module FpPipeline( // @[fp-pipeline.scala:28:7]
input clock, // @[fp-pipeline.scala:28:7]
input reset, // @[fp-pipeline.scala:28:7]
input [15:0] io_brupdate_b1_resolve_mask, // @[fp-pipeline.scala:36:14]
input [15:0] io_brupdate_b1_mispredict_mask, // @[fp-pipeline.scala:36:14]
input [6:0] io_brupdate_b2_uop_uopc, // @[fp-pipeline.scala:36:14]
input [31:0] io_brupdate_b2_uop_inst, // @[fp-pipeline.scala:36:14]
input [31:0] io_brupdate_b2_uop_debug_inst, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_is_rvc, // @[fp-pipeline.scala:36:14]
input [39:0] io_brupdate_b2_uop_debug_pc, // @[fp-pipeline.scala:36:14]
input [2:0] io_brupdate_b2_uop_iq_type, // @[fp-pipeline.scala:36:14]
input [9:0] io_brupdate_b2_uop_fu_code, // @[fp-pipeline.scala:36:14]
input [3:0] io_brupdate_b2_uop_ctrl_br_type, // @[fp-pipeline.scala:36:14]
input [1:0] io_brupdate_b2_uop_ctrl_op1_sel, // @[fp-pipeline.scala:36:14]
input [2:0] io_brupdate_b2_uop_ctrl_op2_sel, // @[fp-pipeline.scala:36:14]
input [2:0] io_brupdate_b2_uop_ctrl_imm_sel, // @[fp-pipeline.scala:36:14]
input [4:0] io_brupdate_b2_uop_ctrl_op_fcn, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_ctrl_fcn_dw, // @[fp-pipeline.scala:36:14]
input [2:0] io_brupdate_b2_uop_ctrl_csr_cmd, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_ctrl_is_load, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_ctrl_is_sta, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_ctrl_is_std, // @[fp-pipeline.scala:36:14]
input [1:0] io_brupdate_b2_uop_iw_state, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_iw_p1_poisoned, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_iw_p2_poisoned, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_is_br, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_is_jalr, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_is_jal, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_is_sfb, // @[fp-pipeline.scala:36:14]
input [15:0] io_brupdate_b2_uop_br_mask, // @[fp-pipeline.scala:36:14]
input [3:0] io_brupdate_b2_uop_br_tag, // @[fp-pipeline.scala:36:14]
input [4:0] io_brupdate_b2_uop_ftq_idx, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_edge_inst, // @[fp-pipeline.scala:36:14]
input [5:0] io_brupdate_b2_uop_pc_lob, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_taken, // @[fp-pipeline.scala:36:14]
input [19:0] io_brupdate_b2_uop_imm_packed, // @[fp-pipeline.scala:36:14]
input [11:0] io_brupdate_b2_uop_csr_addr, // @[fp-pipeline.scala:36:14]
input [6:0] io_brupdate_b2_uop_rob_idx, // @[fp-pipeline.scala:36:14]
input [4:0] io_brupdate_b2_uop_ldq_idx, // @[fp-pipeline.scala:36:14]
input [4:0] io_brupdate_b2_uop_stq_idx, // @[fp-pipeline.scala:36:14]
input [1:0] io_brupdate_b2_uop_rxq_idx, // @[fp-pipeline.scala:36:14]
input [6:0] io_brupdate_b2_uop_pdst, // @[fp-pipeline.scala:36:14]
input [6:0] io_brupdate_b2_uop_prs1, // @[fp-pipeline.scala:36:14]
input [6:0] io_brupdate_b2_uop_prs2, // @[fp-pipeline.scala:36:14]
input [6:0] io_brupdate_b2_uop_prs3, // @[fp-pipeline.scala:36:14]
input [4:0] io_brupdate_b2_uop_ppred, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_prs1_busy, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_prs2_busy, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_prs3_busy, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_ppred_busy, // @[fp-pipeline.scala:36:14]
input [6:0] io_brupdate_b2_uop_stale_pdst, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_exception, // @[fp-pipeline.scala:36:14]
input [63:0] io_brupdate_b2_uop_exc_cause, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_bypassable, // @[fp-pipeline.scala:36:14]
input [4:0] io_brupdate_b2_uop_mem_cmd, // @[fp-pipeline.scala:36:14]
input [1:0] io_brupdate_b2_uop_mem_size, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_mem_signed, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_is_fence, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_is_fencei, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_is_amo, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_uses_ldq, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_uses_stq, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_is_sys_pc2epc, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_is_unique, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_flush_on_commit, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_ldst_is_rs1, // @[fp-pipeline.scala:36:14]
input [5:0] io_brupdate_b2_uop_ldst, // @[fp-pipeline.scala:36:14]
input [5:0] io_brupdate_b2_uop_lrs1, // @[fp-pipeline.scala:36:14]
input [5:0] io_brupdate_b2_uop_lrs2, // @[fp-pipeline.scala:36:14]
input [5:0] io_brupdate_b2_uop_lrs3, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_ldst_val, // @[fp-pipeline.scala:36:14]
input [1:0] io_brupdate_b2_uop_dst_rtype, // @[fp-pipeline.scala:36:14]
input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[fp-pipeline.scala:36:14]
input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_frs3_en, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_fp_val, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_fp_single, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_xcpt_pf_if, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_xcpt_ae_if, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_xcpt_ma_if, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_bp_debug_if, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_uop_bp_xcpt_if, // @[fp-pipeline.scala:36:14]
input [1:0] io_brupdate_b2_uop_debug_fsrc, // @[fp-pipeline.scala:36:14]
input [1:0] io_brupdate_b2_uop_debug_tsrc, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_valid, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_mispredict, // @[fp-pipeline.scala:36:14]
input io_brupdate_b2_taken, // @[fp-pipeline.scala:36:14]
input [2:0] io_brupdate_b2_cfi_type, // @[fp-pipeline.scala:36:14]
input [1:0] io_brupdate_b2_pc_sel, // @[fp-pipeline.scala:36:14]
input [39:0] io_brupdate_b2_jalr_target, // @[fp-pipeline.scala:36:14]
input [20:0] io_brupdate_b2_target_offset, // @[fp-pipeline.scala:36:14]
input io_flush_pipeline, // @[fp-pipeline.scala:36:14]
input [2:0] io_fcsr_rm, // @[fp-pipeline.scala:36:14]
input io_status_debug, // @[fp-pipeline.scala:36:14]
input io_status_cease, // @[fp-pipeline.scala:36:14]
input io_status_wfi, // @[fp-pipeline.scala:36:14]
input [1:0] io_status_dprv, // @[fp-pipeline.scala:36:14]
input io_status_dv, // @[fp-pipeline.scala:36:14]
input [1:0] io_status_prv, // @[fp-pipeline.scala:36:14]
input io_status_v, // @[fp-pipeline.scala:36:14]
input io_status_sd, // @[fp-pipeline.scala:36:14]
input io_status_mpv, // @[fp-pipeline.scala:36:14]
input io_status_gva, // @[fp-pipeline.scala:36:14]
input io_status_tsr, // @[fp-pipeline.scala:36:14]
input io_status_tw, // @[fp-pipeline.scala:36:14]
input io_status_tvm, // @[fp-pipeline.scala:36:14]
input io_status_mxr, // @[fp-pipeline.scala:36:14]
input io_status_sum, // @[fp-pipeline.scala:36:14]
input io_status_mprv, // @[fp-pipeline.scala:36:14]
input [1:0] io_status_fs, // @[fp-pipeline.scala:36:14]
input [1:0] io_status_mpp, // @[fp-pipeline.scala:36:14]
input io_status_spp, // @[fp-pipeline.scala:36:14]
input io_status_mpie, // @[fp-pipeline.scala:36:14]
input io_status_spie, // @[fp-pipeline.scala:36:14]
input io_status_mie, // @[fp-pipeline.scala:36:14]
input io_status_sie, // @[fp-pipeline.scala:36:14]
output io_dis_uops_0_ready, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_valid, // @[fp-pipeline.scala:36:14]
input [6:0] io_dis_uops_0_bits_uopc, // @[fp-pipeline.scala:36:14]
input [31:0] io_dis_uops_0_bits_inst, // @[fp-pipeline.scala:36:14]
input [31:0] io_dis_uops_0_bits_debug_inst, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_is_rvc, // @[fp-pipeline.scala:36:14]
input [39:0] io_dis_uops_0_bits_debug_pc, // @[fp-pipeline.scala:36:14]
input [2:0] io_dis_uops_0_bits_iq_type, // @[fp-pipeline.scala:36:14]
input [9:0] io_dis_uops_0_bits_fu_code, // @[fp-pipeline.scala:36:14]
input [3:0] io_dis_uops_0_bits_ctrl_br_type, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_0_bits_ctrl_op1_sel, // @[fp-pipeline.scala:36:14]
input [2:0] io_dis_uops_0_bits_ctrl_op2_sel, // @[fp-pipeline.scala:36:14]
input [2:0] io_dis_uops_0_bits_ctrl_imm_sel, // @[fp-pipeline.scala:36:14]
input [4:0] io_dis_uops_0_bits_ctrl_op_fcn, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_ctrl_fcn_dw, // @[fp-pipeline.scala:36:14]
input [2:0] io_dis_uops_0_bits_ctrl_csr_cmd, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_ctrl_is_load, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_ctrl_is_sta, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_ctrl_is_std, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_0_bits_iw_state, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_iw_p1_poisoned, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_iw_p2_poisoned, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_is_br, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_is_jalr, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_is_jal, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_is_sfb, // @[fp-pipeline.scala:36:14]
input [15:0] io_dis_uops_0_bits_br_mask, // @[fp-pipeline.scala:36:14]
input [3:0] io_dis_uops_0_bits_br_tag, // @[fp-pipeline.scala:36:14]
input [4:0] io_dis_uops_0_bits_ftq_idx, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_edge_inst, // @[fp-pipeline.scala:36:14]
input [5:0] io_dis_uops_0_bits_pc_lob, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_taken, // @[fp-pipeline.scala:36:14]
input [19:0] io_dis_uops_0_bits_imm_packed, // @[fp-pipeline.scala:36:14]
input [11:0] io_dis_uops_0_bits_csr_addr, // @[fp-pipeline.scala:36:14]
input [6:0] io_dis_uops_0_bits_rob_idx, // @[fp-pipeline.scala:36:14]
input [4:0] io_dis_uops_0_bits_ldq_idx, // @[fp-pipeline.scala:36:14]
input [4:0] io_dis_uops_0_bits_stq_idx, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_0_bits_rxq_idx, // @[fp-pipeline.scala:36:14]
input [6:0] io_dis_uops_0_bits_pdst, // @[fp-pipeline.scala:36:14]
input [6:0] io_dis_uops_0_bits_prs1, // @[fp-pipeline.scala:36:14]
input [6:0] io_dis_uops_0_bits_prs2, // @[fp-pipeline.scala:36:14]
input [6:0] io_dis_uops_0_bits_prs3, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_prs1_busy, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_prs2_busy, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_prs3_busy, // @[fp-pipeline.scala:36:14]
input [6:0] io_dis_uops_0_bits_stale_pdst, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_exception, // @[fp-pipeline.scala:36:14]
input [63:0] io_dis_uops_0_bits_exc_cause, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_bypassable, // @[fp-pipeline.scala:36:14]
input [4:0] io_dis_uops_0_bits_mem_cmd, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_0_bits_mem_size, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_mem_signed, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_is_fence, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_is_fencei, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_is_amo, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_uses_ldq, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_uses_stq, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_is_sys_pc2epc, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_is_unique, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_flush_on_commit, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_ldst_is_rs1, // @[fp-pipeline.scala:36:14]
input [5:0] io_dis_uops_0_bits_ldst, // @[fp-pipeline.scala:36:14]
input [5:0] io_dis_uops_0_bits_lrs1, // @[fp-pipeline.scala:36:14]
input [5:0] io_dis_uops_0_bits_lrs2, // @[fp-pipeline.scala:36:14]
input [5:0] io_dis_uops_0_bits_lrs3, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_ldst_val, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_0_bits_dst_rtype, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_0_bits_lrs1_rtype, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_0_bits_lrs2_rtype, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_frs3_en, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_fp_val, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_fp_single, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_xcpt_pf_if, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_xcpt_ae_if, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_xcpt_ma_if, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_bp_debug_if, // @[fp-pipeline.scala:36:14]
input io_dis_uops_0_bits_bp_xcpt_if, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_0_bits_debug_fsrc, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_0_bits_debug_tsrc, // @[fp-pipeline.scala:36:14]
output io_dis_uops_1_ready, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_valid, // @[fp-pipeline.scala:36:14]
input [6:0] io_dis_uops_1_bits_uopc, // @[fp-pipeline.scala:36:14]
input [31:0] io_dis_uops_1_bits_inst, // @[fp-pipeline.scala:36:14]
input [31:0] io_dis_uops_1_bits_debug_inst, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_is_rvc, // @[fp-pipeline.scala:36:14]
input [39:0] io_dis_uops_1_bits_debug_pc, // @[fp-pipeline.scala:36:14]
input [2:0] io_dis_uops_1_bits_iq_type, // @[fp-pipeline.scala:36:14]
input [9:0] io_dis_uops_1_bits_fu_code, // @[fp-pipeline.scala:36:14]
input [3:0] io_dis_uops_1_bits_ctrl_br_type, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_1_bits_ctrl_op1_sel, // @[fp-pipeline.scala:36:14]
input [2:0] io_dis_uops_1_bits_ctrl_op2_sel, // @[fp-pipeline.scala:36:14]
input [2:0] io_dis_uops_1_bits_ctrl_imm_sel, // @[fp-pipeline.scala:36:14]
input [4:0] io_dis_uops_1_bits_ctrl_op_fcn, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_ctrl_fcn_dw, // @[fp-pipeline.scala:36:14]
input [2:0] io_dis_uops_1_bits_ctrl_csr_cmd, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_ctrl_is_load, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_ctrl_is_sta, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_ctrl_is_std, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_1_bits_iw_state, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_iw_p1_poisoned, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_iw_p2_poisoned, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_is_br, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_is_jalr, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_is_jal, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_is_sfb, // @[fp-pipeline.scala:36:14]
input [15:0] io_dis_uops_1_bits_br_mask, // @[fp-pipeline.scala:36:14]
input [3:0] io_dis_uops_1_bits_br_tag, // @[fp-pipeline.scala:36:14]
input [4:0] io_dis_uops_1_bits_ftq_idx, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_edge_inst, // @[fp-pipeline.scala:36:14]
input [5:0] io_dis_uops_1_bits_pc_lob, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_taken, // @[fp-pipeline.scala:36:14]
input [19:0] io_dis_uops_1_bits_imm_packed, // @[fp-pipeline.scala:36:14]
input [11:0] io_dis_uops_1_bits_csr_addr, // @[fp-pipeline.scala:36:14]
input [6:0] io_dis_uops_1_bits_rob_idx, // @[fp-pipeline.scala:36:14]
input [4:0] io_dis_uops_1_bits_ldq_idx, // @[fp-pipeline.scala:36:14]
input [4:0] io_dis_uops_1_bits_stq_idx, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_1_bits_rxq_idx, // @[fp-pipeline.scala:36:14]
input [6:0] io_dis_uops_1_bits_pdst, // @[fp-pipeline.scala:36:14]
input [6:0] io_dis_uops_1_bits_prs1, // @[fp-pipeline.scala:36:14]
input [6:0] io_dis_uops_1_bits_prs2, // @[fp-pipeline.scala:36:14]
input [6:0] io_dis_uops_1_bits_prs3, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_prs1_busy, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_prs2_busy, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_prs3_busy, // @[fp-pipeline.scala:36:14]
input [6:0] io_dis_uops_1_bits_stale_pdst, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_exception, // @[fp-pipeline.scala:36:14]
input [63:0] io_dis_uops_1_bits_exc_cause, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_bypassable, // @[fp-pipeline.scala:36:14]
input [4:0] io_dis_uops_1_bits_mem_cmd, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_1_bits_mem_size, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_mem_signed, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_is_fence, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_is_fencei, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_is_amo, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_uses_ldq, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_uses_stq, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_is_sys_pc2epc, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_is_unique, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_flush_on_commit, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_ldst_is_rs1, // @[fp-pipeline.scala:36:14]
input [5:0] io_dis_uops_1_bits_ldst, // @[fp-pipeline.scala:36:14]
input [5:0] io_dis_uops_1_bits_lrs1, // @[fp-pipeline.scala:36:14]
input [5:0] io_dis_uops_1_bits_lrs2, // @[fp-pipeline.scala:36:14]
input [5:0] io_dis_uops_1_bits_lrs3, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_ldst_val, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_1_bits_dst_rtype, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_1_bits_lrs1_rtype, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_1_bits_lrs2_rtype, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_frs3_en, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_fp_val, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_fp_single, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_xcpt_pf_if, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_xcpt_ae_if, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_xcpt_ma_if, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_bp_debug_if, // @[fp-pipeline.scala:36:14]
input io_dis_uops_1_bits_bp_xcpt_if, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_1_bits_debug_fsrc, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_1_bits_debug_tsrc, // @[fp-pipeline.scala:36:14]
output io_dis_uops_2_ready, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_valid, // @[fp-pipeline.scala:36:14]
input [6:0] io_dis_uops_2_bits_uopc, // @[fp-pipeline.scala:36:14]
input [31:0] io_dis_uops_2_bits_inst, // @[fp-pipeline.scala:36:14]
input [31:0] io_dis_uops_2_bits_debug_inst, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_is_rvc, // @[fp-pipeline.scala:36:14]
input [39:0] io_dis_uops_2_bits_debug_pc, // @[fp-pipeline.scala:36:14]
input [2:0] io_dis_uops_2_bits_iq_type, // @[fp-pipeline.scala:36:14]
input [9:0] io_dis_uops_2_bits_fu_code, // @[fp-pipeline.scala:36:14]
input [3:0] io_dis_uops_2_bits_ctrl_br_type, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_2_bits_ctrl_op1_sel, // @[fp-pipeline.scala:36:14]
input [2:0] io_dis_uops_2_bits_ctrl_op2_sel, // @[fp-pipeline.scala:36:14]
input [2:0] io_dis_uops_2_bits_ctrl_imm_sel, // @[fp-pipeline.scala:36:14]
input [4:0] io_dis_uops_2_bits_ctrl_op_fcn, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_ctrl_fcn_dw, // @[fp-pipeline.scala:36:14]
input [2:0] io_dis_uops_2_bits_ctrl_csr_cmd, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_ctrl_is_load, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_ctrl_is_sta, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_ctrl_is_std, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_2_bits_iw_state, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_iw_p1_poisoned, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_iw_p2_poisoned, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_is_br, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_is_jalr, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_is_jal, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_is_sfb, // @[fp-pipeline.scala:36:14]
input [15:0] io_dis_uops_2_bits_br_mask, // @[fp-pipeline.scala:36:14]
input [3:0] io_dis_uops_2_bits_br_tag, // @[fp-pipeline.scala:36:14]
input [4:0] io_dis_uops_2_bits_ftq_idx, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_edge_inst, // @[fp-pipeline.scala:36:14]
input [5:0] io_dis_uops_2_bits_pc_lob, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_taken, // @[fp-pipeline.scala:36:14]
input [19:0] io_dis_uops_2_bits_imm_packed, // @[fp-pipeline.scala:36:14]
input [11:0] io_dis_uops_2_bits_csr_addr, // @[fp-pipeline.scala:36:14]
input [6:0] io_dis_uops_2_bits_rob_idx, // @[fp-pipeline.scala:36:14]
input [4:0] io_dis_uops_2_bits_ldq_idx, // @[fp-pipeline.scala:36:14]
input [4:0] io_dis_uops_2_bits_stq_idx, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_2_bits_rxq_idx, // @[fp-pipeline.scala:36:14]
input [6:0] io_dis_uops_2_bits_pdst, // @[fp-pipeline.scala:36:14]
input [6:0] io_dis_uops_2_bits_prs1, // @[fp-pipeline.scala:36:14]
input [6:0] io_dis_uops_2_bits_prs2, // @[fp-pipeline.scala:36:14]
input [6:0] io_dis_uops_2_bits_prs3, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_prs1_busy, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_prs2_busy, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_prs3_busy, // @[fp-pipeline.scala:36:14]
input [6:0] io_dis_uops_2_bits_stale_pdst, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_exception, // @[fp-pipeline.scala:36:14]
input [63:0] io_dis_uops_2_bits_exc_cause, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_bypassable, // @[fp-pipeline.scala:36:14]
input [4:0] io_dis_uops_2_bits_mem_cmd, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_2_bits_mem_size, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_mem_signed, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_is_fence, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_is_fencei, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_is_amo, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_uses_ldq, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_uses_stq, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_is_sys_pc2epc, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_is_unique, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_flush_on_commit, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_ldst_is_rs1, // @[fp-pipeline.scala:36:14]
input [5:0] io_dis_uops_2_bits_ldst, // @[fp-pipeline.scala:36:14]
input [5:0] io_dis_uops_2_bits_lrs1, // @[fp-pipeline.scala:36:14]
input [5:0] io_dis_uops_2_bits_lrs2, // @[fp-pipeline.scala:36:14]
input [5:0] io_dis_uops_2_bits_lrs3, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_ldst_val, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_2_bits_dst_rtype, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_2_bits_lrs1_rtype, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_2_bits_lrs2_rtype, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_frs3_en, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_fp_val, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_fp_single, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_xcpt_pf_if, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_xcpt_ae_if, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_xcpt_ma_if, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_bp_debug_if, // @[fp-pipeline.scala:36:14]
input io_dis_uops_2_bits_bp_xcpt_if, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_2_bits_debug_fsrc, // @[fp-pipeline.scala:36:14]
input [1:0] io_dis_uops_2_bits_debug_tsrc, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_valid, // @[fp-pipeline.scala:36:14]
input [6:0] io_ll_wports_0_bits_uop_uopc, // @[fp-pipeline.scala:36:14]
input [31:0] io_ll_wports_0_bits_uop_inst, // @[fp-pipeline.scala:36:14]
input [31:0] io_ll_wports_0_bits_uop_debug_inst, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_is_rvc, // @[fp-pipeline.scala:36:14]
input [39:0] io_ll_wports_0_bits_uop_debug_pc, // @[fp-pipeline.scala:36:14]
input [2:0] io_ll_wports_0_bits_uop_iq_type, // @[fp-pipeline.scala:36:14]
input [9:0] io_ll_wports_0_bits_uop_fu_code, // @[fp-pipeline.scala:36:14]
input [3:0] io_ll_wports_0_bits_uop_ctrl_br_type, // @[fp-pipeline.scala:36:14]
input [1:0] io_ll_wports_0_bits_uop_ctrl_op1_sel, // @[fp-pipeline.scala:36:14]
input [2:0] io_ll_wports_0_bits_uop_ctrl_op2_sel, // @[fp-pipeline.scala:36:14]
input [2:0] io_ll_wports_0_bits_uop_ctrl_imm_sel, // @[fp-pipeline.scala:36:14]
input [4:0] io_ll_wports_0_bits_uop_ctrl_op_fcn, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_ctrl_fcn_dw, // @[fp-pipeline.scala:36:14]
input [2:0] io_ll_wports_0_bits_uop_ctrl_csr_cmd, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_ctrl_is_load, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_ctrl_is_sta, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_ctrl_is_std, // @[fp-pipeline.scala:36:14]
input [1:0] io_ll_wports_0_bits_uop_iw_state, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_iw_p1_poisoned, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_iw_p2_poisoned, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_is_br, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_is_jalr, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_is_jal, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_is_sfb, // @[fp-pipeline.scala:36:14]
input [15:0] io_ll_wports_0_bits_uop_br_mask, // @[fp-pipeline.scala:36:14]
input [3:0] io_ll_wports_0_bits_uop_br_tag, // @[fp-pipeline.scala:36:14]
input [4:0] io_ll_wports_0_bits_uop_ftq_idx, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_edge_inst, // @[fp-pipeline.scala:36:14]
input [5:0] io_ll_wports_0_bits_uop_pc_lob, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_taken, // @[fp-pipeline.scala:36:14]
input [19:0] io_ll_wports_0_bits_uop_imm_packed, // @[fp-pipeline.scala:36:14]
input [11:0] io_ll_wports_0_bits_uop_csr_addr, // @[fp-pipeline.scala:36:14]
input [6:0] io_ll_wports_0_bits_uop_rob_idx, // @[fp-pipeline.scala:36:14]
input [4:0] io_ll_wports_0_bits_uop_ldq_idx, // @[fp-pipeline.scala:36:14]
input [4:0] io_ll_wports_0_bits_uop_stq_idx, // @[fp-pipeline.scala:36:14]
input [1:0] io_ll_wports_0_bits_uop_rxq_idx, // @[fp-pipeline.scala:36:14]
input [6:0] io_ll_wports_0_bits_uop_pdst, // @[fp-pipeline.scala:36:14]
input [6:0] io_ll_wports_0_bits_uop_prs1, // @[fp-pipeline.scala:36:14]
input [6:0] io_ll_wports_0_bits_uop_prs2, // @[fp-pipeline.scala:36:14]
input [6:0] io_ll_wports_0_bits_uop_prs3, // @[fp-pipeline.scala:36:14]
input [4:0] io_ll_wports_0_bits_uop_ppred, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_prs1_busy, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_prs2_busy, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_prs3_busy, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_ppred_busy, // @[fp-pipeline.scala:36:14]
input [6:0] io_ll_wports_0_bits_uop_stale_pdst, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_exception, // @[fp-pipeline.scala:36:14]
input [63:0] io_ll_wports_0_bits_uop_exc_cause, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_bypassable, // @[fp-pipeline.scala:36:14]
input [4:0] io_ll_wports_0_bits_uop_mem_cmd, // @[fp-pipeline.scala:36:14]
input [1:0] io_ll_wports_0_bits_uop_mem_size, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_mem_signed, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_is_fence, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_is_fencei, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_is_amo, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_uses_ldq, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_uses_stq, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_is_sys_pc2epc, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_is_unique, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_flush_on_commit, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_ldst_is_rs1, // @[fp-pipeline.scala:36:14]
input [5:0] io_ll_wports_0_bits_uop_ldst, // @[fp-pipeline.scala:36:14]
input [5:0] io_ll_wports_0_bits_uop_lrs1, // @[fp-pipeline.scala:36:14]
input [5:0] io_ll_wports_0_bits_uop_lrs2, // @[fp-pipeline.scala:36:14]
input [5:0] io_ll_wports_0_bits_uop_lrs3, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_ldst_val, // @[fp-pipeline.scala:36:14]
input [1:0] io_ll_wports_0_bits_uop_dst_rtype, // @[fp-pipeline.scala:36:14]
input [1:0] io_ll_wports_0_bits_uop_lrs1_rtype, // @[fp-pipeline.scala:36:14]
input [1:0] io_ll_wports_0_bits_uop_lrs2_rtype, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_frs3_en, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_fp_val, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_fp_single, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_xcpt_pf_if, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_xcpt_ae_if, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_xcpt_ma_if, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_bp_debug_if, // @[fp-pipeline.scala:36:14]
input io_ll_wports_0_bits_uop_bp_xcpt_if, // @[fp-pipeline.scala:36:14]
input [1:0] io_ll_wports_0_bits_uop_debug_fsrc, // @[fp-pipeline.scala:36:14]
input [1:0] io_ll_wports_0_bits_uop_debug_tsrc, // @[fp-pipeline.scala:36:14]
input [64:0] io_ll_wports_0_bits_data, // @[fp-pipeline.scala:36:14]
output io_from_int_ready, // @[fp-pipeline.scala:36:14]
input io_from_int_valid, // @[fp-pipeline.scala:36:14]
input [6:0] io_from_int_bits_uop_uopc, // @[fp-pipeline.scala:36:14]
input [31:0] io_from_int_bits_uop_inst, // @[fp-pipeline.scala:36:14]
input [31:0] io_from_int_bits_uop_debug_inst, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_is_rvc, // @[fp-pipeline.scala:36:14]
input [39:0] io_from_int_bits_uop_debug_pc, // @[fp-pipeline.scala:36:14]
input [2:0] io_from_int_bits_uop_iq_type, // @[fp-pipeline.scala:36:14]
input [9:0] io_from_int_bits_uop_fu_code, // @[fp-pipeline.scala:36:14]
input [3:0] io_from_int_bits_uop_ctrl_br_type, // @[fp-pipeline.scala:36:14]
input [1:0] io_from_int_bits_uop_ctrl_op1_sel, // @[fp-pipeline.scala:36:14]
input [2:0] io_from_int_bits_uop_ctrl_op2_sel, // @[fp-pipeline.scala:36:14]
input [2:0] io_from_int_bits_uop_ctrl_imm_sel, // @[fp-pipeline.scala:36:14]
input [4:0] io_from_int_bits_uop_ctrl_op_fcn, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_ctrl_fcn_dw, // @[fp-pipeline.scala:36:14]
input [2:0] io_from_int_bits_uop_ctrl_csr_cmd, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_ctrl_is_load, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_ctrl_is_sta, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_ctrl_is_std, // @[fp-pipeline.scala:36:14]
input [1:0] io_from_int_bits_uop_iw_state, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_iw_p1_poisoned, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_iw_p2_poisoned, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_is_br, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_is_jalr, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_is_jal, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_is_sfb, // @[fp-pipeline.scala:36:14]
input [15:0] io_from_int_bits_uop_br_mask, // @[fp-pipeline.scala:36:14]
input [3:0] io_from_int_bits_uop_br_tag, // @[fp-pipeline.scala:36:14]
input [4:0] io_from_int_bits_uop_ftq_idx, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_edge_inst, // @[fp-pipeline.scala:36:14]
input [5:0] io_from_int_bits_uop_pc_lob, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_taken, // @[fp-pipeline.scala:36:14]
input [19:0] io_from_int_bits_uop_imm_packed, // @[fp-pipeline.scala:36:14]
input [11:0] io_from_int_bits_uop_csr_addr, // @[fp-pipeline.scala:36:14]
input [6:0] io_from_int_bits_uop_rob_idx, // @[fp-pipeline.scala:36:14]
input [4:0] io_from_int_bits_uop_ldq_idx, // @[fp-pipeline.scala:36:14]
input [4:0] io_from_int_bits_uop_stq_idx, // @[fp-pipeline.scala:36:14]
input [1:0] io_from_int_bits_uop_rxq_idx, // @[fp-pipeline.scala:36:14]
input [6:0] io_from_int_bits_uop_pdst, // @[fp-pipeline.scala:36:14]
input [6:0] io_from_int_bits_uop_prs1, // @[fp-pipeline.scala:36:14]
input [6:0] io_from_int_bits_uop_prs2, // @[fp-pipeline.scala:36:14]
input [6:0] io_from_int_bits_uop_prs3, // @[fp-pipeline.scala:36:14]
input [4:0] io_from_int_bits_uop_ppred, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_prs1_busy, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_prs2_busy, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_prs3_busy, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_ppred_busy, // @[fp-pipeline.scala:36:14]
input [6:0] io_from_int_bits_uop_stale_pdst, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_exception, // @[fp-pipeline.scala:36:14]
input [63:0] io_from_int_bits_uop_exc_cause, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_bypassable, // @[fp-pipeline.scala:36:14]
input [4:0] io_from_int_bits_uop_mem_cmd, // @[fp-pipeline.scala:36:14]
input [1:0] io_from_int_bits_uop_mem_size, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_mem_signed, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_is_fence, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_is_fencei, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_is_amo, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_uses_ldq, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_uses_stq, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_is_sys_pc2epc, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_is_unique, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_flush_on_commit, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_ldst_is_rs1, // @[fp-pipeline.scala:36:14]
input [5:0] io_from_int_bits_uop_ldst, // @[fp-pipeline.scala:36:14]
input [5:0] io_from_int_bits_uop_lrs1, // @[fp-pipeline.scala:36:14]
input [5:0] io_from_int_bits_uop_lrs2, // @[fp-pipeline.scala:36:14]
input [5:0] io_from_int_bits_uop_lrs3, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_ldst_val, // @[fp-pipeline.scala:36:14]
input [1:0] io_from_int_bits_uop_dst_rtype, // @[fp-pipeline.scala:36:14]
input [1:0] io_from_int_bits_uop_lrs1_rtype, // @[fp-pipeline.scala:36:14]
input [1:0] io_from_int_bits_uop_lrs2_rtype, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_frs3_en, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_fp_val, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_fp_single, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_xcpt_pf_if, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_xcpt_ae_if, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_xcpt_ma_if, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_bp_debug_if, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_uop_bp_xcpt_if, // @[fp-pipeline.scala:36:14]
input [1:0] io_from_int_bits_uop_debug_fsrc, // @[fp-pipeline.scala:36:14]
input [1:0] io_from_int_bits_uop_debug_tsrc, // @[fp-pipeline.scala:36:14]
input [64:0] io_from_int_bits_data, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_predicated, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_valid, // @[fp-pipeline.scala:36:14]
input [6:0] io_from_int_bits_fflags_bits_uop_uopc, // @[fp-pipeline.scala:36:14]
input [31:0] io_from_int_bits_fflags_bits_uop_inst, // @[fp-pipeline.scala:36:14]
input [31:0] io_from_int_bits_fflags_bits_uop_debug_inst, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_is_rvc, // @[fp-pipeline.scala:36:14]
input [39:0] io_from_int_bits_fflags_bits_uop_debug_pc, // @[fp-pipeline.scala:36:14]
input [2:0] io_from_int_bits_fflags_bits_uop_iq_type, // @[fp-pipeline.scala:36:14]
input [9:0] io_from_int_bits_fflags_bits_uop_fu_code, // @[fp-pipeline.scala:36:14]
input [3:0] io_from_int_bits_fflags_bits_uop_ctrl_br_type, // @[fp-pipeline.scala:36:14]
input [1:0] io_from_int_bits_fflags_bits_uop_ctrl_op1_sel, // @[fp-pipeline.scala:36:14]
input [2:0] io_from_int_bits_fflags_bits_uop_ctrl_op2_sel, // @[fp-pipeline.scala:36:14]
input [2:0] io_from_int_bits_fflags_bits_uop_ctrl_imm_sel, // @[fp-pipeline.scala:36:14]
input [4:0] io_from_int_bits_fflags_bits_uop_ctrl_op_fcn, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_ctrl_fcn_dw, // @[fp-pipeline.scala:36:14]
input [2:0] io_from_int_bits_fflags_bits_uop_ctrl_csr_cmd, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_ctrl_is_load, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_ctrl_is_sta, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_ctrl_is_std, // @[fp-pipeline.scala:36:14]
input [1:0] io_from_int_bits_fflags_bits_uop_iw_state, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_iw_p1_poisoned, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_iw_p2_poisoned, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_is_br, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_is_jalr, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_is_jal, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_is_sfb, // @[fp-pipeline.scala:36:14]
input [15:0] io_from_int_bits_fflags_bits_uop_br_mask, // @[fp-pipeline.scala:36:14]
input [3:0] io_from_int_bits_fflags_bits_uop_br_tag, // @[fp-pipeline.scala:36:14]
input [4:0] io_from_int_bits_fflags_bits_uop_ftq_idx, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_edge_inst, // @[fp-pipeline.scala:36:14]
input [5:0] io_from_int_bits_fflags_bits_uop_pc_lob, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_taken, // @[fp-pipeline.scala:36:14]
input [19:0] io_from_int_bits_fflags_bits_uop_imm_packed, // @[fp-pipeline.scala:36:14]
input [11:0] io_from_int_bits_fflags_bits_uop_csr_addr, // @[fp-pipeline.scala:36:14]
input [6:0] io_from_int_bits_fflags_bits_uop_rob_idx, // @[fp-pipeline.scala:36:14]
input [4:0] io_from_int_bits_fflags_bits_uop_ldq_idx, // @[fp-pipeline.scala:36:14]
input [4:0] io_from_int_bits_fflags_bits_uop_stq_idx, // @[fp-pipeline.scala:36:14]
input [1:0] io_from_int_bits_fflags_bits_uop_rxq_idx, // @[fp-pipeline.scala:36:14]
input [6:0] io_from_int_bits_fflags_bits_uop_pdst, // @[fp-pipeline.scala:36:14]
input [6:0] io_from_int_bits_fflags_bits_uop_prs1, // @[fp-pipeline.scala:36:14]
input [6:0] io_from_int_bits_fflags_bits_uop_prs2, // @[fp-pipeline.scala:36:14]
input [6:0] io_from_int_bits_fflags_bits_uop_prs3, // @[fp-pipeline.scala:36:14]
input [4:0] io_from_int_bits_fflags_bits_uop_ppred, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_prs1_busy, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_prs2_busy, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_prs3_busy, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_ppred_busy, // @[fp-pipeline.scala:36:14]
input [6:0] io_from_int_bits_fflags_bits_uop_stale_pdst, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_exception, // @[fp-pipeline.scala:36:14]
input [63:0] io_from_int_bits_fflags_bits_uop_exc_cause, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_bypassable, // @[fp-pipeline.scala:36:14]
input [4:0] io_from_int_bits_fflags_bits_uop_mem_cmd, // @[fp-pipeline.scala:36:14]
input [1:0] io_from_int_bits_fflags_bits_uop_mem_size, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_mem_signed, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_is_fence, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_is_fencei, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_is_amo, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_uses_ldq, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_uses_stq, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_is_sys_pc2epc, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_is_unique, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_flush_on_commit, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_ldst_is_rs1, // @[fp-pipeline.scala:36:14]
input [5:0] io_from_int_bits_fflags_bits_uop_ldst, // @[fp-pipeline.scala:36:14]
input [5:0] io_from_int_bits_fflags_bits_uop_lrs1, // @[fp-pipeline.scala:36:14]
input [5:0] io_from_int_bits_fflags_bits_uop_lrs2, // @[fp-pipeline.scala:36:14]
input [5:0] io_from_int_bits_fflags_bits_uop_lrs3, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_ldst_val, // @[fp-pipeline.scala:36:14]
input [1:0] io_from_int_bits_fflags_bits_uop_dst_rtype, // @[fp-pipeline.scala:36:14]
input [1:0] io_from_int_bits_fflags_bits_uop_lrs1_rtype, // @[fp-pipeline.scala:36:14]
input [1:0] io_from_int_bits_fflags_bits_uop_lrs2_rtype, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_frs3_en, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_fp_val, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_fp_single, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_xcpt_pf_if, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_xcpt_ae_if, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_xcpt_ma_if, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_bp_debug_if, // @[fp-pipeline.scala:36:14]
input io_from_int_bits_fflags_bits_uop_bp_xcpt_if, // @[fp-pipeline.scala:36:14]
input [1:0] io_from_int_bits_fflags_bits_uop_debug_fsrc, // @[fp-pipeline.scala:36:14]
input [1:0] io_from_int_bits_fflags_bits_uop_debug_tsrc, // @[fp-pipeline.scala:36:14]
input [4:0] io_from_int_bits_fflags_bits_flags, // @[fp-pipeline.scala:36:14]
input io_to_sdq_ready, // @[fp-pipeline.scala:36:14]
output io_to_sdq_valid, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_sdq_bits_uop_uopc, // @[fp-pipeline.scala:36:14]
output [31:0] io_to_sdq_bits_uop_inst, // @[fp-pipeline.scala:36:14]
output [31:0] io_to_sdq_bits_uop_debug_inst, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_is_rvc, // @[fp-pipeline.scala:36:14]
output [39:0] io_to_sdq_bits_uop_debug_pc, // @[fp-pipeline.scala:36:14]
output [2:0] io_to_sdq_bits_uop_iq_type, // @[fp-pipeline.scala:36:14]
output [9:0] io_to_sdq_bits_uop_fu_code, // @[fp-pipeline.scala:36:14]
output [3:0] io_to_sdq_bits_uop_ctrl_br_type, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_sdq_bits_uop_ctrl_op1_sel, // @[fp-pipeline.scala:36:14]
output [2:0] io_to_sdq_bits_uop_ctrl_op2_sel, // @[fp-pipeline.scala:36:14]
output [2:0] io_to_sdq_bits_uop_ctrl_imm_sel, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_sdq_bits_uop_ctrl_op_fcn, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_ctrl_fcn_dw, // @[fp-pipeline.scala:36:14]
output [2:0] io_to_sdq_bits_uop_ctrl_csr_cmd, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_ctrl_is_load, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_ctrl_is_sta, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_ctrl_is_std, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_sdq_bits_uop_iw_state, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_iw_p1_poisoned, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_iw_p2_poisoned, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_is_br, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_is_jalr, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_is_jal, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_is_sfb, // @[fp-pipeline.scala:36:14]
output [15:0] io_to_sdq_bits_uop_br_mask, // @[fp-pipeline.scala:36:14]
output [3:0] io_to_sdq_bits_uop_br_tag, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_sdq_bits_uop_ftq_idx, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_edge_inst, // @[fp-pipeline.scala:36:14]
output [5:0] io_to_sdq_bits_uop_pc_lob, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_taken, // @[fp-pipeline.scala:36:14]
output [19:0] io_to_sdq_bits_uop_imm_packed, // @[fp-pipeline.scala:36:14]
output [11:0] io_to_sdq_bits_uop_csr_addr, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_sdq_bits_uop_rob_idx, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_sdq_bits_uop_ldq_idx, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_sdq_bits_uop_stq_idx, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_sdq_bits_uop_rxq_idx, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_sdq_bits_uop_pdst, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_sdq_bits_uop_prs1, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_sdq_bits_uop_prs2, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_sdq_bits_uop_prs3, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_sdq_bits_uop_ppred, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_prs1_busy, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_prs2_busy, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_prs3_busy, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_ppred_busy, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_sdq_bits_uop_stale_pdst, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_exception, // @[fp-pipeline.scala:36:14]
output [63:0] io_to_sdq_bits_uop_exc_cause, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_bypassable, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_sdq_bits_uop_mem_cmd, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_sdq_bits_uop_mem_size, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_mem_signed, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_is_fence, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_is_fencei, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_is_amo, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_uses_ldq, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_uses_stq, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_is_sys_pc2epc, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_is_unique, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_flush_on_commit, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_ldst_is_rs1, // @[fp-pipeline.scala:36:14]
output [5:0] io_to_sdq_bits_uop_ldst, // @[fp-pipeline.scala:36:14]
output [5:0] io_to_sdq_bits_uop_lrs1, // @[fp-pipeline.scala:36:14]
output [5:0] io_to_sdq_bits_uop_lrs2, // @[fp-pipeline.scala:36:14]
output [5:0] io_to_sdq_bits_uop_lrs3, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_ldst_val, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_sdq_bits_uop_dst_rtype, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_sdq_bits_uop_lrs1_rtype, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_sdq_bits_uop_lrs2_rtype, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_frs3_en, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_fp_val, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_fp_single, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_xcpt_pf_if, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_xcpt_ae_if, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_xcpt_ma_if, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_bp_debug_if, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_uop_bp_xcpt_if, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_sdq_bits_uop_debug_fsrc, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_sdq_bits_uop_debug_tsrc, // @[fp-pipeline.scala:36:14]
output [63:0] io_to_sdq_bits_data, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_predicated, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_valid, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_sdq_bits_fflags_bits_uop_uopc, // @[fp-pipeline.scala:36:14]
output [31:0] io_to_sdq_bits_fflags_bits_uop_inst, // @[fp-pipeline.scala:36:14]
output [31:0] io_to_sdq_bits_fflags_bits_uop_debug_inst, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_is_rvc, // @[fp-pipeline.scala:36:14]
output [39:0] io_to_sdq_bits_fflags_bits_uop_debug_pc, // @[fp-pipeline.scala:36:14]
output [2:0] io_to_sdq_bits_fflags_bits_uop_iq_type, // @[fp-pipeline.scala:36:14]
output [9:0] io_to_sdq_bits_fflags_bits_uop_fu_code, // @[fp-pipeline.scala:36:14]
output [3:0] io_to_sdq_bits_fflags_bits_uop_ctrl_br_type, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_sdq_bits_fflags_bits_uop_ctrl_op1_sel, // @[fp-pipeline.scala:36:14]
output [2:0] io_to_sdq_bits_fflags_bits_uop_ctrl_op2_sel, // @[fp-pipeline.scala:36:14]
output [2:0] io_to_sdq_bits_fflags_bits_uop_ctrl_imm_sel, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_sdq_bits_fflags_bits_uop_ctrl_op_fcn, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_ctrl_fcn_dw, // @[fp-pipeline.scala:36:14]
output [2:0] io_to_sdq_bits_fflags_bits_uop_ctrl_csr_cmd, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_ctrl_is_load, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_ctrl_is_sta, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_ctrl_is_std, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_sdq_bits_fflags_bits_uop_iw_state, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_iw_p1_poisoned, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_iw_p2_poisoned, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_is_br, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_is_jalr, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_is_jal, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_is_sfb, // @[fp-pipeline.scala:36:14]
output [15:0] io_to_sdq_bits_fflags_bits_uop_br_mask, // @[fp-pipeline.scala:36:14]
output [3:0] io_to_sdq_bits_fflags_bits_uop_br_tag, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_sdq_bits_fflags_bits_uop_ftq_idx, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_edge_inst, // @[fp-pipeline.scala:36:14]
output [5:0] io_to_sdq_bits_fflags_bits_uop_pc_lob, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_taken, // @[fp-pipeline.scala:36:14]
output [19:0] io_to_sdq_bits_fflags_bits_uop_imm_packed, // @[fp-pipeline.scala:36:14]
output [11:0] io_to_sdq_bits_fflags_bits_uop_csr_addr, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_sdq_bits_fflags_bits_uop_rob_idx, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_sdq_bits_fflags_bits_uop_ldq_idx, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_sdq_bits_fflags_bits_uop_stq_idx, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_sdq_bits_fflags_bits_uop_rxq_idx, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_sdq_bits_fflags_bits_uop_pdst, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_sdq_bits_fflags_bits_uop_prs1, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_sdq_bits_fflags_bits_uop_prs2, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_sdq_bits_fflags_bits_uop_prs3, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_sdq_bits_fflags_bits_uop_ppred, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_prs1_busy, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_prs2_busy, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_prs3_busy, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_ppred_busy, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_sdq_bits_fflags_bits_uop_stale_pdst, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_exception, // @[fp-pipeline.scala:36:14]
output [63:0] io_to_sdq_bits_fflags_bits_uop_exc_cause, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_bypassable, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_sdq_bits_fflags_bits_uop_mem_cmd, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_sdq_bits_fflags_bits_uop_mem_size, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_mem_signed, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_is_fence, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_is_fencei, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_is_amo, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_uses_ldq, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_uses_stq, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_is_sys_pc2epc, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_is_unique, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_flush_on_commit, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_ldst_is_rs1, // @[fp-pipeline.scala:36:14]
output [5:0] io_to_sdq_bits_fflags_bits_uop_ldst, // @[fp-pipeline.scala:36:14]
output [5:0] io_to_sdq_bits_fflags_bits_uop_lrs1, // @[fp-pipeline.scala:36:14]
output [5:0] io_to_sdq_bits_fflags_bits_uop_lrs2, // @[fp-pipeline.scala:36:14]
output [5:0] io_to_sdq_bits_fflags_bits_uop_lrs3, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_ldst_val, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_sdq_bits_fflags_bits_uop_dst_rtype, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_sdq_bits_fflags_bits_uop_lrs1_rtype, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_sdq_bits_fflags_bits_uop_lrs2_rtype, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_frs3_en, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_fp_val, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_fp_single, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_xcpt_pf_if, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_xcpt_ae_if, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_xcpt_ma_if, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_bp_debug_if, // @[fp-pipeline.scala:36:14]
output io_to_sdq_bits_fflags_bits_uop_bp_xcpt_if, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_sdq_bits_fflags_bits_uop_debug_fsrc, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_sdq_bits_fflags_bits_uop_debug_tsrc, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_sdq_bits_fflags_bits_flags, // @[fp-pipeline.scala:36:14]
input io_to_int_ready, // @[fp-pipeline.scala:36:14]
output io_to_int_valid, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_int_bits_uop_uopc, // @[fp-pipeline.scala:36:14]
output [31:0] io_to_int_bits_uop_inst, // @[fp-pipeline.scala:36:14]
output [31:0] io_to_int_bits_uop_debug_inst, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_is_rvc, // @[fp-pipeline.scala:36:14]
output [39:0] io_to_int_bits_uop_debug_pc, // @[fp-pipeline.scala:36:14]
output [2:0] io_to_int_bits_uop_iq_type, // @[fp-pipeline.scala:36:14]
output [9:0] io_to_int_bits_uop_fu_code, // @[fp-pipeline.scala:36:14]
output [3:0] io_to_int_bits_uop_ctrl_br_type, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_int_bits_uop_ctrl_op1_sel, // @[fp-pipeline.scala:36:14]
output [2:0] io_to_int_bits_uop_ctrl_op2_sel, // @[fp-pipeline.scala:36:14]
output [2:0] io_to_int_bits_uop_ctrl_imm_sel, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_int_bits_uop_ctrl_op_fcn, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_ctrl_fcn_dw, // @[fp-pipeline.scala:36:14]
output [2:0] io_to_int_bits_uop_ctrl_csr_cmd, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_ctrl_is_load, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_ctrl_is_sta, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_ctrl_is_std, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_int_bits_uop_iw_state, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_iw_p1_poisoned, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_iw_p2_poisoned, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_is_br, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_is_jalr, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_is_jal, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_is_sfb, // @[fp-pipeline.scala:36:14]
output [15:0] io_to_int_bits_uop_br_mask, // @[fp-pipeline.scala:36:14]
output [3:0] io_to_int_bits_uop_br_tag, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_int_bits_uop_ftq_idx, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_edge_inst, // @[fp-pipeline.scala:36:14]
output [5:0] io_to_int_bits_uop_pc_lob, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_taken, // @[fp-pipeline.scala:36:14]
output [19:0] io_to_int_bits_uop_imm_packed, // @[fp-pipeline.scala:36:14]
output [11:0] io_to_int_bits_uop_csr_addr, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_int_bits_uop_rob_idx, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_int_bits_uop_ldq_idx, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_int_bits_uop_stq_idx, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_int_bits_uop_rxq_idx, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_int_bits_uop_pdst, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_int_bits_uop_prs1, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_int_bits_uop_prs2, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_int_bits_uop_prs3, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_int_bits_uop_ppred, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_prs1_busy, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_prs2_busy, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_prs3_busy, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_ppred_busy, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_int_bits_uop_stale_pdst, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_exception, // @[fp-pipeline.scala:36:14]
output [63:0] io_to_int_bits_uop_exc_cause, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_bypassable, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_int_bits_uop_mem_cmd, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_int_bits_uop_mem_size, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_mem_signed, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_is_fence, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_is_fencei, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_is_amo, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_uses_ldq, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_uses_stq, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_is_sys_pc2epc, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_is_unique, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_flush_on_commit, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_ldst_is_rs1, // @[fp-pipeline.scala:36:14]
output [5:0] io_to_int_bits_uop_ldst, // @[fp-pipeline.scala:36:14]
output [5:0] io_to_int_bits_uop_lrs1, // @[fp-pipeline.scala:36:14]
output [5:0] io_to_int_bits_uop_lrs2, // @[fp-pipeline.scala:36:14]
output [5:0] io_to_int_bits_uop_lrs3, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_ldst_val, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_int_bits_uop_dst_rtype, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_int_bits_uop_lrs1_rtype, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_int_bits_uop_lrs2_rtype, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_frs3_en, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_fp_val, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_fp_single, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_xcpt_pf_if, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_xcpt_ae_if, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_xcpt_ma_if, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_bp_debug_if, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_uop_bp_xcpt_if, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_int_bits_uop_debug_fsrc, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_int_bits_uop_debug_tsrc, // @[fp-pipeline.scala:36:14]
output [63:0] io_to_int_bits_data, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_predicated, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_valid, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_int_bits_fflags_bits_uop_uopc, // @[fp-pipeline.scala:36:14]
output [31:0] io_to_int_bits_fflags_bits_uop_inst, // @[fp-pipeline.scala:36:14]
output [31:0] io_to_int_bits_fflags_bits_uop_debug_inst, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_is_rvc, // @[fp-pipeline.scala:36:14]
output [39:0] io_to_int_bits_fflags_bits_uop_debug_pc, // @[fp-pipeline.scala:36:14]
output [2:0] io_to_int_bits_fflags_bits_uop_iq_type, // @[fp-pipeline.scala:36:14]
output [9:0] io_to_int_bits_fflags_bits_uop_fu_code, // @[fp-pipeline.scala:36:14]
output [3:0] io_to_int_bits_fflags_bits_uop_ctrl_br_type, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_int_bits_fflags_bits_uop_ctrl_op1_sel, // @[fp-pipeline.scala:36:14]
output [2:0] io_to_int_bits_fflags_bits_uop_ctrl_op2_sel, // @[fp-pipeline.scala:36:14]
output [2:0] io_to_int_bits_fflags_bits_uop_ctrl_imm_sel, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_int_bits_fflags_bits_uop_ctrl_op_fcn, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_ctrl_fcn_dw, // @[fp-pipeline.scala:36:14]
output [2:0] io_to_int_bits_fflags_bits_uop_ctrl_csr_cmd, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_ctrl_is_load, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_ctrl_is_sta, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_ctrl_is_std, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_int_bits_fflags_bits_uop_iw_state, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_iw_p1_poisoned, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_iw_p2_poisoned, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_is_br, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_is_jalr, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_is_jal, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_is_sfb, // @[fp-pipeline.scala:36:14]
output [15:0] io_to_int_bits_fflags_bits_uop_br_mask, // @[fp-pipeline.scala:36:14]
output [3:0] io_to_int_bits_fflags_bits_uop_br_tag, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_int_bits_fflags_bits_uop_ftq_idx, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_edge_inst, // @[fp-pipeline.scala:36:14]
output [5:0] io_to_int_bits_fflags_bits_uop_pc_lob, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_taken, // @[fp-pipeline.scala:36:14]
output [19:0] io_to_int_bits_fflags_bits_uop_imm_packed, // @[fp-pipeline.scala:36:14]
output [11:0] io_to_int_bits_fflags_bits_uop_csr_addr, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_int_bits_fflags_bits_uop_rob_idx, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_int_bits_fflags_bits_uop_ldq_idx, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_int_bits_fflags_bits_uop_stq_idx, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_int_bits_fflags_bits_uop_rxq_idx, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_int_bits_fflags_bits_uop_pdst, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_int_bits_fflags_bits_uop_prs1, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_int_bits_fflags_bits_uop_prs2, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_int_bits_fflags_bits_uop_prs3, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_int_bits_fflags_bits_uop_ppred, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_prs1_busy, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_prs2_busy, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_prs3_busy, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_ppred_busy, // @[fp-pipeline.scala:36:14]
output [6:0] io_to_int_bits_fflags_bits_uop_stale_pdst, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_exception, // @[fp-pipeline.scala:36:14]
output [63:0] io_to_int_bits_fflags_bits_uop_exc_cause, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_bypassable, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_int_bits_fflags_bits_uop_mem_cmd, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_int_bits_fflags_bits_uop_mem_size, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_mem_signed, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_is_fence, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_is_fencei, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_is_amo, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_uses_ldq, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_uses_stq, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_is_sys_pc2epc, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_is_unique, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_flush_on_commit, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_ldst_is_rs1, // @[fp-pipeline.scala:36:14]
output [5:0] io_to_int_bits_fflags_bits_uop_ldst, // @[fp-pipeline.scala:36:14]
output [5:0] io_to_int_bits_fflags_bits_uop_lrs1, // @[fp-pipeline.scala:36:14]
output [5:0] io_to_int_bits_fflags_bits_uop_lrs2, // @[fp-pipeline.scala:36:14]
output [5:0] io_to_int_bits_fflags_bits_uop_lrs3, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_ldst_val, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_int_bits_fflags_bits_uop_dst_rtype, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_int_bits_fflags_bits_uop_lrs1_rtype, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_int_bits_fflags_bits_uop_lrs2_rtype, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_frs3_en, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_fp_val, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_fp_single, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_xcpt_pf_if, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_xcpt_ae_if, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_xcpt_ma_if, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_bp_debug_if, // @[fp-pipeline.scala:36:14]
output io_to_int_bits_fflags_bits_uop_bp_xcpt_if, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_int_bits_fflags_bits_uop_debug_fsrc, // @[fp-pipeline.scala:36:14]
output [1:0] io_to_int_bits_fflags_bits_uop_debug_tsrc, // @[fp-pipeline.scala:36:14]
output [4:0] io_to_int_bits_fflags_bits_flags, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_valid, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_0_bits_uop_uopc, // @[fp-pipeline.scala:36:14]
output [31:0] io_wakeups_0_bits_uop_inst, // @[fp-pipeline.scala:36:14]
output [31:0] io_wakeups_0_bits_uop_debug_inst, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_is_rvc, // @[fp-pipeline.scala:36:14]
output [39:0] io_wakeups_0_bits_uop_debug_pc, // @[fp-pipeline.scala:36:14]
output [2:0] io_wakeups_0_bits_uop_iq_type, // @[fp-pipeline.scala:36:14]
output [9:0] io_wakeups_0_bits_uop_fu_code, // @[fp-pipeline.scala:36:14]
output [3:0] io_wakeups_0_bits_uop_ctrl_br_type, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_0_bits_uop_ctrl_op1_sel, // @[fp-pipeline.scala:36:14]
output [2:0] io_wakeups_0_bits_uop_ctrl_op2_sel, // @[fp-pipeline.scala:36:14]
output [2:0] io_wakeups_0_bits_uop_ctrl_imm_sel, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_0_bits_uop_ctrl_op_fcn, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_ctrl_fcn_dw, // @[fp-pipeline.scala:36:14]
output [2:0] io_wakeups_0_bits_uop_ctrl_csr_cmd, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_ctrl_is_load, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_ctrl_is_sta, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_ctrl_is_std, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_0_bits_uop_iw_state, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_iw_p1_poisoned, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_iw_p2_poisoned, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_is_br, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_is_jalr, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_is_jal, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_is_sfb, // @[fp-pipeline.scala:36:14]
output [15:0] io_wakeups_0_bits_uop_br_mask, // @[fp-pipeline.scala:36:14]
output [3:0] io_wakeups_0_bits_uop_br_tag, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_0_bits_uop_ftq_idx, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_edge_inst, // @[fp-pipeline.scala:36:14]
output [5:0] io_wakeups_0_bits_uop_pc_lob, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_taken, // @[fp-pipeline.scala:36:14]
output [19:0] io_wakeups_0_bits_uop_imm_packed, // @[fp-pipeline.scala:36:14]
output [11:0] io_wakeups_0_bits_uop_csr_addr, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_0_bits_uop_rob_idx, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_0_bits_uop_ldq_idx, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_0_bits_uop_stq_idx, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_0_bits_uop_rxq_idx, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_0_bits_uop_pdst, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_0_bits_uop_prs1, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_0_bits_uop_prs2, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_0_bits_uop_prs3, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_0_bits_uop_ppred, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_prs1_busy, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_prs2_busy, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_prs3_busy, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_ppred_busy, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_0_bits_uop_stale_pdst, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_exception, // @[fp-pipeline.scala:36:14]
output [63:0] io_wakeups_0_bits_uop_exc_cause, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_bypassable, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_0_bits_uop_mem_cmd, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_0_bits_uop_mem_size, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_mem_signed, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_is_fence, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_is_fencei, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_is_amo, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_uses_ldq, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_uses_stq, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_is_sys_pc2epc, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_is_unique, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_flush_on_commit, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_ldst_is_rs1, // @[fp-pipeline.scala:36:14]
output [5:0] io_wakeups_0_bits_uop_ldst, // @[fp-pipeline.scala:36:14]
output [5:0] io_wakeups_0_bits_uop_lrs1, // @[fp-pipeline.scala:36:14]
output [5:0] io_wakeups_0_bits_uop_lrs2, // @[fp-pipeline.scala:36:14]
output [5:0] io_wakeups_0_bits_uop_lrs3, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_ldst_val, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_0_bits_uop_dst_rtype, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_0_bits_uop_lrs1_rtype, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_0_bits_uop_lrs2_rtype, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_frs3_en, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_fp_val, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_fp_single, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_xcpt_pf_if, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_xcpt_ae_if, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_xcpt_ma_if, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_bp_debug_if, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_uop_bp_xcpt_if, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_0_bits_uop_debug_fsrc, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_0_bits_uop_debug_tsrc, // @[fp-pipeline.scala:36:14]
output [64:0] io_wakeups_0_bits_data, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_predicated, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_valid, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_0_bits_fflags_bits_uop_uopc, // @[fp-pipeline.scala:36:14]
output [31:0] io_wakeups_0_bits_fflags_bits_uop_inst, // @[fp-pipeline.scala:36:14]
output [31:0] io_wakeups_0_bits_fflags_bits_uop_debug_inst, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_is_rvc, // @[fp-pipeline.scala:36:14]
output [39:0] io_wakeups_0_bits_fflags_bits_uop_debug_pc, // @[fp-pipeline.scala:36:14]
output [2:0] io_wakeups_0_bits_fflags_bits_uop_iq_type, // @[fp-pipeline.scala:36:14]
output [9:0] io_wakeups_0_bits_fflags_bits_uop_fu_code, // @[fp-pipeline.scala:36:14]
output [3:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_br_type, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op1_sel, // @[fp-pipeline.scala:36:14]
output [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op2_sel, // @[fp-pipeline.scala:36:14]
output [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_imm_sel, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op_fcn, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_ctrl_fcn_dw, // @[fp-pipeline.scala:36:14]
output [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_csr_cmd, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_ctrl_is_load, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_ctrl_is_sta, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_ctrl_is_std, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_0_bits_fflags_bits_uop_iw_state, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_iw_p1_poisoned, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_iw_p2_poisoned, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_is_br, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_is_jalr, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_is_jal, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_is_sfb, // @[fp-pipeline.scala:36:14]
output [15:0] io_wakeups_0_bits_fflags_bits_uop_br_mask, // @[fp-pipeline.scala:36:14]
output [3:0] io_wakeups_0_bits_fflags_bits_uop_br_tag, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_0_bits_fflags_bits_uop_ftq_idx, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_edge_inst, // @[fp-pipeline.scala:36:14]
output [5:0] io_wakeups_0_bits_fflags_bits_uop_pc_lob, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_taken, // @[fp-pipeline.scala:36:14]
output [19:0] io_wakeups_0_bits_fflags_bits_uop_imm_packed, // @[fp-pipeline.scala:36:14]
output [11:0] io_wakeups_0_bits_fflags_bits_uop_csr_addr, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_0_bits_fflags_bits_uop_rob_idx, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_0_bits_fflags_bits_uop_ldq_idx, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_0_bits_fflags_bits_uop_stq_idx, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_0_bits_fflags_bits_uop_rxq_idx, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_0_bits_fflags_bits_uop_pdst, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_0_bits_fflags_bits_uop_prs1, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_0_bits_fflags_bits_uop_prs2, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_0_bits_fflags_bits_uop_prs3, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_0_bits_fflags_bits_uop_ppred, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_prs1_busy, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_prs2_busy, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_prs3_busy, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_ppred_busy, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_0_bits_fflags_bits_uop_stale_pdst, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_exception, // @[fp-pipeline.scala:36:14]
output [63:0] io_wakeups_0_bits_fflags_bits_uop_exc_cause, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_bypassable, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_0_bits_fflags_bits_uop_mem_cmd, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_0_bits_fflags_bits_uop_mem_size, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_mem_signed, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_is_fence, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_is_fencei, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_is_amo, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_uses_ldq, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_uses_stq, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_is_sys_pc2epc, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_is_unique, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_flush_on_commit, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_ldst_is_rs1, // @[fp-pipeline.scala:36:14]
output [5:0] io_wakeups_0_bits_fflags_bits_uop_ldst, // @[fp-pipeline.scala:36:14]
output [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs1, // @[fp-pipeline.scala:36:14]
output [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs2, // @[fp-pipeline.scala:36:14]
output [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs3, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_ldst_val, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_0_bits_fflags_bits_uop_dst_rtype, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_0_bits_fflags_bits_uop_lrs1_rtype, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_0_bits_fflags_bits_uop_lrs2_rtype, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_frs3_en, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_fp_val, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_fp_single, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_xcpt_pf_if, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_xcpt_ae_if, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_xcpt_ma_if, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_bp_debug_if, // @[fp-pipeline.scala:36:14]
output io_wakeups_0_bits_fflags_bits_uop_bp_xcpt_if, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_0_bits_fflags_bits_uop_debug_fsrc, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_0_bits_fflags_bits_uop_debug_tsrc, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_0_bits_fflags_bits_flags, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_valid, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_1_bits_uop_uopc, // @[fp-pipeline.scala:36:14]
output [31:0] io_wakeups_1_bits_uop_inst, // @[fp-pipeline.scala:36:14]
output [31:0] io_wakeups_1_bits_uop_debug_inst, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_is_rvc, // @[fp-pipeline.scala:36:14]
output [39:0] io_wakeups_1_bits_uop_debug_pc, // @[fp-pipeline.scala:36:14]
output [2:0] io_wakeups_1_bits_uop_iq_type, // @[fp-pipeline.scala:36:14]
output [9:0] io_wakeups_1_bits_uop_fu_code, // @[fp-pipeline.scala:36:14]
output [3:0] io_wakeups_1_bits_uop_ctrl_br_type, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_1_bits_uop_ctrl_op1_sel, // @[fp-pipeline.scala:36:14]
output [2:0] io_wakeups_1_bits_uop_ctrl_op2_sel, // @[fp-pipeline.scala:36:14]
output [2:0] io_wakeups_1_bits_uop_ctrl_imm_sel, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_1_bits_uop_ctrl_op_fcn, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_ctrl_fcn_dw, // @[fp-pipeline.scala:36:14]
output [2:0] io_wakeups_1_bits_uop_ctrl_csr_cmd, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_ctrl_is_load, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_ctrl_is_sta, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_ctrl_is_std, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_1_bits_uop_iw_state, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_iw_p1_poisoned, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_iw_p2_poisoned, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_is_br, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_is_jalr, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_is_jal, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_is_sfb, // @[fp-pipeline.scala:36:14]
output [15:0] io_wakeups_1_bits_uop_br_mask, // @[fp-pipeline.scala:36:14]
output [3:0] io_wakeups_1_bits_uop_br_tag, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_1_bits_uop_ftq_idx, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_edge_inst, // @[fp-pipeline.scala:36:14]
output [5:0] io_wakeups_1_bits_uop_pc_lob, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_taken, // @[fp-pipeline.scala:36:14]
output [19:0] io_wakeups_1_bits_uop_imm_packed, // @[fp-pipeline.scala:36:14]
output [11:0] io_wakeups_1_bits_uop_csr_addr, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_1_bits_uop_rob_idx, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_1_bits_uop_ldq_idx, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_1_bits_uop_stq_idx, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_1_bits_uop_rxq_idx, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_1_bits_uop_pdst, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_1_bits_uop_prs1, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_1_bits_uop_prs2, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_1_bits_uop_prs3, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_1_bits_uop_ppred, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_prs1_busy, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_prs2_busy, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_prs3_busy, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_ppred_busy, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_1_bits_uop_stale_pdst, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_exception, // @[fp-pipeline.scala:36:14]
output [63:0] io_wakeups_1_bits_uop_exc_cause, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_bypassable, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_1_bits_uop_mem_cmd, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_1_bits_uop_mem_size, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_mem_signed, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_is_fence, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_is_fencei, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_is_amo, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_uses_ldq, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_uses_stq, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_is_sys_pc2epc, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_is_unique, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_flush_on_commit, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_ldst_is_rs1, // @[fp-pipeline.scala:36:14]
output [5:0] io_wakeups_1_bits_uop_ldst, // @[fp-pipeline.scala:36:14]
output [5:0] io_wakeups_1_bits_uop_lrs1, // @[fp-pipeline.scala:36:14]
output [5:0] io_wakeups_1_bits_uop_lrs2, // @[fp-pipeline.scala:36:14]
output [5:0] io_wakeups_1_bits_uop_lrs3, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_ldst_val, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_1_bits_uop_dst_rtype, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_1_bits_uop_lrs1_rtype, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_1_bits_uop_lrs2_rtype, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_frs3_en, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_fp_val, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_fp_single, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_xcpt_pf_if, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_xcpt_ae_if, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_xcpt_ma_if, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_bp_debug_if, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_uop_bp_xcpt_if, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_1_bits_uop_debug_fsrc, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_1_bits_uop_debug_tsrc, // @[fp-pipeline.scala:36:14]
output [64:0] io_wakeups_1_bits_data, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_valid, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_1_bits_fflags_bits_uop_uopc, // @[fp-pipeline.scala:36:14]
output [31:0] io_wakeups_1_bits_fflags_bits_uop_inst, // @[fp-pipeline.scala:36:14]
output [31:0] io_wakeups_1_bits_fflags_bits_uop_debug_inst, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_is_rvc, // @[fp-pipeline.scala:36:14]
output [39:0] io_wakeups_1_bits_fflags_bits_uop_debug_pc, // @[fp-pipeline.scala:36:14]
output [2:0] io_wakeups_1_bits_fflags_bits_uop_iq_type, // @[fp-pipeline.scala:36:14]
output [9:0] io_wakeups_1_bits_fflags_bits_uop_fu_code, // @[fp-pipeline.scala:36:14]
output [3:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_br_type, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_op1_sel, // @[fp-pipeline.scala:36:14]
output [2:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_op2_sel, // @[fp-pipeline.scala:36:14]
output [2:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_imm_sel, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_op_fcn, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_ctrl_fcn_dw, // @[fp-pipeline.scala:36:14]
output [2:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_csr_cmd, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_ctrl_is_load, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_ctrl_is_sta, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_ctrl_is_std, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_1_bits_fflags_bits_uop_iw_state, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_iw_p1_poisoned, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_iw_p2_poisoned, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_is_br, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_is_jalr, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_is_jal, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_is_sfb, // @[fp-pipeline.scala:36:14]
output [15:0] io_wakeups_1_bits_fflags_bits_uop_br_mask, // @[fp-pipeline.scala:36:14]
output [3:0] io_wakeups_1_bits_fflags_bits_uop_br_tag, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_1_bits_fflags_bits_uop_ftq_idx, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_edge_inst, // @[fp-pipeline.scala:36:14]
output [5:0] io_wakeups_1_bits_fflags_bits_uop_pc_lob, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_taken, // @[fp-pipeline.scala:36:14]
output [19:0] io_wakeups_1_bits_fflags_bits_uop_imm_packed, // @[fp-pipeline.scala:36:14]
output [11:0] io_wakeups_1_bits_fflags_bits_uop_csr_addr, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_1_bits_fflags_bits_uop_rob_idx, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_1_bits_fflags_bits_uop_ldq_idx, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_1_bits_fflags_bits_uop_stq_idx, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_1_bits_fflags_bits_uop_rxq_idx, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_1_bits_fflags_bits_uop_pdst, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_1_bits_fflags_bits_uop_prs1, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_1_bits_fflags_bits_uop_prs2, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_1_bits_fflags_bits_uop_prs3, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_1_bits_fflags_bits_uop_ppred, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_prs1_busy, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_prs2_busy, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_prs3_busy, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_ppred_busy, // @[fp-pipeline.scala:36:14]
output [6:0] io_wakeups_1_bits_fflags_bits_uop_stale_pdst, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_exception, // @[fp-pipeline.scala:36:14]
output [63:0] io_wakeups_1_bits_fflags_bits_uop_exc_cause, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_bypassable, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_1_bits_fflags_bits_uop_mem_cmd, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_1_bits_fflags_bits_uop_mem_size, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_mem_signed, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_is_fence, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_is_fencei, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_is_amo, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_uses_ldq, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_uses_stq, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_is_sys_pc2epc, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_is_unique, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_flush_on_commit, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_ldst_is_rs1, // @[fp-pipeline.scala:36:14]
output [5:0] io_wakeups_1_bits_fflags_bits_uop_ldst, // @[fp-pipeline.scala:36:14]
output [5:0] io_wakeups_1_bits_fflags_bits_uop_lrs1, // @[fp-pipeline.scala:36:14]
output [5:0] io_wakeups_1_bits_fflags_bits_uop_lrs2, // @[fp-pipeline.scala:36:14]
output [5:0] io_wakeups_1_bits_fflags_bits_uop_lrs3, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_ldst_val, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_1_bits_fflags_bits_uop_dst_rtype, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_1_bits_fflags_bits_uop_lrs1_rtype, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_1_bits_fflags_bits_uop_lrs2_rtype, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_frs3_en, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_fp_val, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_fp_single, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_xcpt_pf_if, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_xcpt_ae_if, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_xcpt_ma_if, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_bp_debug_if, // @[fp-pipeline.scala:36:14]
output io_wakeups_1_bits_fflags_bits_uop_bp_xcpt_if, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_1_bits_fflags_bits_uop_debug_fsrc, // @[fp-pipeline.scala:36:14]
output [1:0] io_wakeups_1_bits_fflags_bits_uop_debug_tsrc, // @[fp-pipeline.scala:36:14]
output [4:0] io_wakeups_1_bits_fflags_bits_flags, // @[fp-pipeline.scala:36:14]
input [63:0] io_debug_tsc_reg, // @[fp-pipeline.scala:36:14]
output [64:0] io_debug_wb_wdata_0, // @[fp-pipeline.scala:36:14]
output [64:0] io_debug_wb_wdata_1 // @[fp-pipeline.scala:36:14]
);
wire ll_wbarb_io_in_0_bits_data_rawIn_1_isNaN; // @[rawFloatFromFN.scala:63:19]
wire ll_wbarb_io_in_0_bits_data_rawIn_isNaN; // @[rawFloatFromFN.scala:63:19]
wire _ll_wbarb_io_out_valid; // @[fp-pipeline.scala:170:24]
wire [6:0] _ll_wbarb_io_out_bits_uop_pdst; // @[fp-pipeline.scala:170:24]
wire [1:0] _ll_wbarb_io_out_bits_uop_dst_rtype; // @[fp-pipeline.scala:170:24]
wire [64:0] _ll_wbarb_io_out_bits_data; // @[fp-pipeline.scala:170:24]
wire [6:0] _fregister_read_io_rf_read_ports_0_addr; // @[fp-pipeline.scala:73:30]
wire [6:0] _fregister_read_io_rf_read_ports_1_addr; // @[fp-pipeline.scala:73:30]
wire [6:0] _fregister_read_io_rf_read_ports_2_addr; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_valid; // @[fp-pipeline.scala:73:30]
wire [6:0] _fregister_read_io_exe_reqs_0_bits_uop_uopc; // @[fp-pipeline.scala:73:30]
wire [31:0] _fregister_read_io_exe_reqs_0_bits_uop_inst; // @[fp-pipeline.scala:73:30]
wire [31:0] _fregister_read_io_exe_reqs_0_bits_uop_debug_inst; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_is_rvc; // @[fp-pipeline.scala:73:30]
wire [39:0] _fregister_read_io_exe_reqs_0_bits_uop_debug_pc; // @[fp-pipeline.scala:73:30]
wire [2:0] _fregister_read_io_exe_reqs_0_bits_uop_iq_type; // @[fp-pipeline.scala:73:30]
wire [9:0] _fregister_read_io_exe_reqs_0_bits_uop_fu_code; // @[fp-pipeline.scala:73:30]
wire [3:0] _fregister_read_io_exe_reqs_0_bits_uop_ctrl_br_type; // @[fp-pipeline.scala:73:30]
wire [1:0] _fregister_read_io_exe_reqs_0_bits_uop_ctrl_op1_sel; // @[fp-pipeline.scala:73:30]
wire [2:0] _fregister_read_io_exe_reqs_0_bits_uop_ctrl_op2_sel; // @[fp-pipeline.scala:73:30]
wire [2:0] _fregister_read_io_exe_reqs_0_bits_uop_ctrl_imm_sel; // @[fp-pipeline.scala:73:30]
wire [4:0] _fregister_read_io_exe_reqs_0_bits_uop_ctrl_op_fcn; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_ctrl_fcn_dw; // @[fp-pipeline.scala:73:30]
wire [2:0] _fregister_read_io_exe_reqs_0_bits_uop_ctrl_csr_cmd; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_ctrl_is_load; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_ctrl_is_sta; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_ctrl_is_std; // @[fp-pipeline.scala:73:30]
wire [1:0] _fregister_read_io_exe_reqs_0_bits_uop_iw_state; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_iw_p1_poisoned; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_iw_p2_poisoned; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_is_br; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_is_jalr; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_is_jal; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_is_sfb; // @[fp-pipeline.scala:73:30]
wire [15:0] _fregister_read_io_exe_reqs_0_bits_uop_br_mask; // @[fp-pipeline.scala:73:30]
wire [3:0] _fregister_read_io_exe_reqs_0_bits_uop_br_tag; // @[fp-pipeline.scala:73:30]
wire [4:0] _fregister_read_io_exe_reqs_0_bits_uop_ftq_idx; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_edge_inst; // @[fp-pipeline.scala:73:30]
wire [5:0] _fregister_read_io_exe_reqs_0_bits_uop_pc_lob; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_taken; // @[fp-pipeline.scala:73:30]
wire [19:0] _fregister_read_io_exe_reqs_0_bits_uop_imm_packed; // @[fp-pipeline.scala:73:30]
wire [11:0] _fregister_read_io_exe_reqs_0_bits_uop_csr_addr; // @[fp-pipeline.scala:73:30]
wire [6:0] _fregister_read_io_exe_reqs_0_bits_uop_rob_idx; // @[fp-pipeline.scala:73:30]
wire [4:0] _fregister_read_io_exe_reqs_0_bits_uop_ldq_idx; // @[fp-pipeline.scala:73:30]
wire [4:0] _fregister_read_io_exe_reqs_0_bits_uop_stq_idx; // @[fp-pipeline.scala:73:30]
wire [1:0] _fregister_read_io_exe_reqs_0_bits_uop_rxq_idx; // @[fp-pipeline.scala:73:30]
wire [6:0] _fregister_read_io_exe_reqs_0_bits_uop_pdst; // @[fp-pipeline.scala:73:30]
wire [6:0] _fregister_read_io_exe_reqs_0_bits_uop_prs1; // @[fp-pipeline.scala:73:30]
wire [6:0] _fregister_read_io_exe_reqs_0_bits_uop_prs2; // @[fp-pipeline.scala:73:30]
wire [6:0] _fregister_read_io_exe_reqs_0_bits_uop_prs3; // @[fp-pipeline.scala:73:30]
wire [4:0] _fregister_read_io_exe_reqs_0_bits_uop_ppred; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_prs1_busy; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_prs2_busy; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_prs3_busy; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_ppred_busy; // @[fp-pipeline.scala:73:30]
wire [6:0] _fregister_read_io_exe_reqs_0_bits_uop_stale_pdst; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_exception; // @[fp-pipeline.scala:73:30]
wire [63:0] _fregister_read_io_exe_reqs_0_bits_uop_exc_cause; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_bypassable; // @[fp-pipeline.scala:73:30]
wire [4:0] _fregister_read_io_exe_reqs_0_bits_uop_mem_cmd; // @[fp-pipeline.scala:73:30]
wire [1:0] _fregister_read_io_exe_reqs_0_bits_uop_mem_size; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_mem_signed; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_is_fence; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_is_fencei; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_is_amo; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_uses_ldq; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_uses_stq; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_is_sys_pc2epc; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_is_unique; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_flush_on_commit; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_ldst_is_rs1; // @[fp-pipeline.scala:73:30]
wire [5:0] _fregister_read_io_exe_reqs_0_bits_uop_ldst; // @[fp-pipeline.scala:73:30]
wire [5:0] _fregister_read_io_exe_reqs_0_bits_uop_lrs1; // @[fp-pipeline.scala:73:30]
wire [5:0] _fregister_read_io_exe_reqs_0_bits_uop_lrs2; // @[fp-pipeline.scala:73:30]
wire [5:0] _fregister_read_io_exe_reqs_0_bits_uop_lrs3; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_ldst_val; // @[fp-pipeline.scala:73:30]
wire [1:0] _fregister_read_io_exe_reqs_0_bits_uop_dst_rtype; // @[fp-pipeline.scala:73:30]
wire [1:0] _fregister_read_io_exe_reqs_0_bits_uop_lrs1_rtype; // @[fp-pipeline.scala:73:30]
wire [1:0] _fregister_read_io_exe_reqs_0_bits_uop_lrs2_rtype; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_frs3_en; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_fp_val; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_fp_single; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_xcpt_pf_if; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_xcpt_ae_if; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_xcpt_ma_if; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_bp_debug_if; // @[fp-pipeline.scala:73:30]
wire _fregister_read_io_exe_reqs_0_bits_uop_bp_xcpt_if; // @[fp-pipeline.scala:73:30]
wire [1:0] _fregister_read_io_exe_reqs_0_bits_uop_debug_fsrc; // @[fp-pipeline.scala:73:30]
wire [1:0] _fregister_read_io_exe_reqs_0_bits_uop_debug_tsrc; // @[fp-pipeline.scala:73:30]
wire [64:0] _fregister_read_io_exe_reqs_0_bits_rs1_data; // @[fp-pipeline.scala:73:30]
wire [64:0] _fregister_read_io_exe_reqs_0_bits_rs2_data; // @[fp-pipeline.scala:73:30]
wire [64:0] _fregister_read_io_exe_reqs_0_bits_rs3_data; // @[fp-pipeline.scala:73:30]
wire [64:0] _fregfile_io_read_ports_0_data; // @[fp-pipeline.scala:66:30]
wire [64:0] _fregfile_io_read_ports_1_data; // @[fp-pipeline.scala:66:30]
wire [64:0] _fregfile_io_read_ports_2_data; // @[fp-pipeline.scala:66:30]
wire [9:0] _fpu_exe_unit_io_fu_types; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_fresp_valid; // @[execution-units.scala:131:32]
wire [6:0] _fpu_exe_unit_io_fresp_bits_uop_pdst; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_fresp_bits_uop_is_amo; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_fresp_bits_uop_uses_ldq; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_fresp_bits_uop_uses_stq; // @[execution-units.scala:131:32]
wire [1:0] _fpu_exe_unit_io_fresp_bits_uop_dst_rtype; // @[execution-units.scala:131:32]
wire [64:0] _fpu_exe_unit_io_fresp_bits_data; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_valid; // @[execution-units.scala:131:32]
wire [6:0] _fpu_exe_unit_io_ll_iresp_bits_uop_uopc; // @[execution-units.scala:131:32]
wire [31:0] _fpu_exe_unit_io_ll_iresp_bits_uop_inst; // @[execution-units.scala:131:32]
wire [31:0] _fpu_exe_unit_io_ll_iresp_bits_uop_debug_inst; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_is_rvc; // @[execution-units.scala:131:32]
wire [39:0] _fpu_exe_unit_io_ll_iresp_bits_uop_debug_pc; // @[execution-units.scala:131:32]
wire [2:0] _fpu_exe_unit_io_ll_iresp_bits_uop_iq_type; // @[execution-units.scala:131:32]
wire [9:0] _fpu_exe_unit_io_ll_iresp_bits_uop_fu_code; // @[execution-units.scala:131:32]
wire [3:0] _fpu_exe_unit_io_ll_iresp_bits_uop_ctrl_br_type; // @[execution-units.scala:131:32]
wire [1:0] _fpu_exe_unit_io_ll_iresp_bits_uop_ctrl_op1_sel; // @[execution-units.scala:131:32]
wire [2:0] _fpu_exe_unit_io_ll_iresp_bits_uop_ctrl_op2_sel; // @[execution-units.scala:131:32]
wire [2:0] _fpu_exe_unit_io_ll_iresp_bits_uop_ctrl_imm_sel; // @[execution-units.scala:131:32]
wire [4:0] _fpu_exe_unit_io_ll_iresp_bits_uop_ctrl_op_fcn; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_ctrl_fcn_dw; // @[execution-units.scala:131:32]
wire [2:0] _fpu_exe_unit_io_ll_iresp_bits_uop_ctrl_csr_cmd; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_ctrl_is_load; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_ctrl_is_sta; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_ctrl_is_std; // @[execution-units.scala:131:32]
wire [1:0] _fpu_exe_unit_io_ll_iresp_bits_uop_iw_state; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_iw_p1_poisoned; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_iw_p2_poisoned; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_is_br; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_is_jalr; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_is_jal; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_is_sfb; // @[execution-units.scala:131:32]
wire [15:0] _fpu_exe_unit_io_ll_iresp_bits_uop_br_mask; // @[execution-units.scala:131:32]
wire [3:0] _fpu_exe_unit_io_ll_iresp_bits_uop_br_tag; // @[execution-units.scala:131:32]
wire [4:0] _fpu_exe_unit_io_ll_iresp_bits_uop_ftq_idx; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_edge_inst; // @[execution-units.scala:131:32]
wire [5:0] _fpu_exe_unit_io_ll_iresp_bits_uop_pc_lob; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_taken; // @[execution-units.scala:131:32]
wire [19:0] _fpu_exe_unit_io_ll_iresp_bits_uop_imm_packed; // @[execution-units.scala:131:32]
wire [11:0] _fpu_exe_unit_io_ll_iresp_bits_uop_csr_addr; // @[execution-units.scala:131:32]
wire [6:0] _fpu_exe_unit_io_ll_iresp_bits_uop_rob_idx; // @[execution-units.scala:131:32]
wire [4:0] _fpu_exe_unit_io_ll_iresp_bits_uop_ldq_idx; // @[execution-units.scala:131:32]
wire [4:0] _fpu_exe_unit_io_ll_iresp_bits_uop_stq_idx; // @[execution-units.scala:131:32]
wire [1:0] _fpu_exe_unit_io_ll_iresp_bits_uop_rxq_idx; // @[execution-units.scala:131:32]
wire [6:0] _fpu_exe_unit_io_ll_iresp_bits_uop_pdst; // @[execution-units.scala:131:32]
wire [6:0] _fpu_exe_unit_io_ll_iresp_bits_uop_prs1; // @[execution-units.scala:131:32]
wire [6:0] _fpu_exe_unit_io_ll_iresp_bits_uop_prs2; // @[execution-units.scala:131:32]
wire [6:0] _fpu_exe_unit_io_ll_iresp_bits_uop_prs3; // @[execution-units.scala:131:32]
wire [4:0] _fpu_exe_unit_io_ll_iresp_bits_uop_ppred; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_prs1_busy; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_prs2_busy; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_prs3_busy; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_ppred_busy; // @[execution-units.scala:131:32]
wire [6:0] _fpu_exe_unit_io_ll_iresp_bits_uop_stale_pdst; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_exception; // @[execution-units.scala:131:32]
wire [63:0] _fpu_exe_unit_io_ll_iresp_bits_uop_exc_cause; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_bypassable; // @[execution-units.scala:131:32]
wire [4:0] _fpu_exe_unit_io_ll_iresp_bits_uop_mem_cmd; // @[execution-units.scala:131:32]
wire [1:0] _fpu_exe_unit_io_ll_iresp_bits_uop_mem_size; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_mem_signed; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_is_fence; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_is_fencei; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_is_amo; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_uses_ldq; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_uses_stq; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_is_sys_pc2epc; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_is_unique; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_flush_on_commit; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_ldst_is_rs1; // @[execution-units.scala:131:32]
wire [5:0] _fpu_exe_unit_io_ll_iresp_bits_uop_ldst; // @[execution-units.scala:131:32]
wire [5:0] _fpu_exe_unit_io_ll_iresp_bits_uop_lrs1; // @[execution-units.scala:131:32]
wire [5:0] _fpu_exe_unit_io_ll_iresp_bits_uop_lrs2; // @[execution-units.scala:131:32]
wire [5:0] _fpu_exe_unit_io_ll_iresp_bits_uop_lrs3; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_ldst_val; // @[execution-units.scala:131:32]
wire [1:0] _fpu_exe_unit_io_ll_iresp_bits_uop_dst_rtype; // @[execution-units.scala:131:32]
wire [1:0] _fpu_exe_unit_io_ll_iresp_bits_uop_lrs1_rtype; // @[execution-units.scala:131:32]
wire [1:0] _fpu_exe_unit_io_ll_iresp_bits_uop_lrs2_rtype; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_frs3_en; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_fp_val; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_fp_single; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_xcpt_pf_if; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_xcpt_ae_if; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_xcpt_ma_if; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_bp_debug_if; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_uop_bp_xcpt_if; // @[execution-units.scala:131:32]
wire [1:0] _fpu_exe_unit_io_ll_iresp_bits_uop_debug_fsrc; // @[execution-units.scala:131:32]
wire [1:0] _fpu_exe_unit_io_ll_iresp_bits_uop_debug_tsrc; // @[execution-units.scala:131:32]
wire [64:0] _fpu_exe_unit_io_ll_iresp_bits_data; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_predicated; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_valid; // @[execution-units.scala:131:32]
wire [6:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_uopc; // @[execution-units.scala:131:32]
wire [31:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_inst; // @[execution-units.scala:131:32]
wire [31:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_debug_inst; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_is_rvc; // @[execution-units.scala:131:32]
wire [39:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_debug_pc; // @[execution-units.scala:131:32]
wire [2:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_iq_type; // @[execution-units.scala:131:32]
wire [9:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_fu_code; // @[execution-units.scala:131:32]
wire [3:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_ctrl_br_type; // @[execution-units.scala:131:32]
wire [1:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_ctrl_op1_sel; // @[execution-units.scala:131:32]
wire [2:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_ctrl_op2_sel; // @[execution-units.scala:131:32]
wire [2:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_ctrl_imm_sel; // @[execution-units.scala:131:32]
wire [4:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_ctrl_op_fcn; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_ctrl_fcn_dw; // @[execution-units.scala:131:32]
wire [2:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_ctrl_csr_cmd; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_ctrl_is_load; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_ctrl_is_sta; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_ctrl_is_std; // @[execution-units.scala:131:32]
wire [1:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_iw_state; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_iw_p1_poisoned; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_iw_p2_poisoned; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_is_br; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_is_jalr; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_is_jal; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_is_sfb; // @[execution-units.scala:131:32]
wire [15:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_br_mask; // @[execution-units.scala:131:32]
wire [3:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_br_tag; // @[execution-units.scala:131:32]
wire [4:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_ftq_idx; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_edge_inst; // @[execution-units.scala:131:32]
wire [5:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_pc_lob; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_taken; // @[execution-units.scala:131:32]
wire [19:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_imm_packed; // @[execution-units.scala:131:32]
wire [11:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_csr_addr; // @[execution-units.scala:131:32]
wire [6:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_rob_idx; // @[execution-units.scala:131:32]
wire [4:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_ldq_idx; // @[execution-units.scala:131:32]
wire [4:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_stq_idx; // @[execution-units.scala:131:32]
wire [1:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_rxq_idx; // @[execution-units.scala:131:32]
wire [6:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_pdst; // @[execution-units.scala:131:32]
wire [6:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_prs1; // @[execution-units.scala:131:32]
wire [6:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_prs2; // @[execution-units.scala:131:32]
wire [6:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_prs3; // @[execution-units.scala:131:32]
wire [4:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_ppred; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_prs1_busy; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_prs2_busy; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_prs3_busy; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_ppred_busy; // @[execution-units.scala:131:32]
wire [6:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_stale_pdst; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_exception; // @[execution-units.scala:131:32]
wire [63:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_exc_cause; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_bypassable; // @[execution-units.scala:131:32]
wire [4:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_mem_cmd; // @[execution-units.scala:131:32]
wire [1:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_mem_size; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_mem_signed; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_is_fence; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_is_fencei; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_is_amo; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_uses_ldq; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_uses_stq; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_is_sys_pc2epc; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_is_unique; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_flush_on_commit; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_ldst_is_rs1; // @[execution-units.scala:131:32]
wire [5:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_ldst; // @[execution-units.scala:131:32]
wire [5:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_lrs1; // @[execution-units.scala:131:32]
wire [5:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_lrs2; // @[execution-units.scala:131:32]
wire [5:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_lrs3; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_ldst_val; // @[execution-units.scala:131:32]
wire [1:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_dst_rtype; // @[execution-units.scala:131:32]
wire [1:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_lrs1_rtype; // @[execution-units.scala:131:32]
wire [1:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_lrs2_rtype; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_frs3_en; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_fp_val; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_fp_single; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_xcpt_pf_if; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_xcpt_ae_if; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_xcpt_ma_if; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_bp_debug_if; // @[execution-units.scala:131:32]
wire _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_bp_xcpt_if; // @[execution-units.scala:131:32]
wire [1:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_debug_fsrc; // @[execution-units.scala:131:32]
wire [1:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_uop_debug_tsrc; // @[execution-units.scala:131:32]
wire [4:0] _fpu_exe_unit_io_ll_iresp_bits_fflags_bits_flags; // @[execution-units.scala:131:32]
wire [15:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[fp-pipeline.scala:28:7]
wire [15:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[fp-pipeline.scala:28:7]
wire [6:0] io_brupdate_b2_uop_uopc_0 = io_brupdate_b2_uop_uopc; // @[fp-pipeline.scala:28:7]
wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[fp-pipeline.scala:28:7]
wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[fp-pipeline.scala:28:7]
wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[fp-pipeline.scala:28:7]
wire [2:0] io_brupdate_b2_uop_iq_type_0 = io_brupdate_b2_uop_iq_type; // @[fp-pipeline.scala:28:7]
wire [9:0] io_brupdate_b2_uop_fu_code_0 = io_brupdate_b2_uop_fu_code; // @[fp-pipeline.scala:28:7]
wire [3:0] io_brupdate_b2_uop_ctrl_br_type_0 = io_brupdate_b2_uop_ctrl_br_type; // @[fp-pipeline.scala:28:7]
wire [1:0] io_brupdate_b2_uop_ctrl_op1_sel_0 = io_brupdate_b2_uop_ctrl_op1_sel; // @[fp-pipeline.scala:28:7]
wire [2:0] io_brupdate_b2_uop_ctrl_op2_sel_0 = io_brupdate_b2_uop_ctrl_op2_sel; // @[fp-pipeline.scala:28:7]
wire [2:0] io_brupdate_b2_uop_ctrl_imm_sel_0 = io_brupdate_b2_uop_ctrl_imm_sel; // @[fp-pipeline.scala:28:7]
wire [4:0] io_brupdate_b2_uop_ctrl_op_fcn_0 = io_brupdate_b2_uop_ctrl_op_fcn; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_ctrl_fcn_dw_0 = io_brupdate_b2_uop_ctrl_fcn_dw; // @[fp-pipeline.scala:28:7]
wire [2:0] io_brupdate_b2_uop_ctrl_csr_cmd_0 = io_brupdate_b2_uop_ctrl_csr_cmd; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_ctrl_is_load_0 = io_brupdate_b2_uop_ctrl_is_load; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_ctrl_is_sta_0 = io_brupdate_b2_uop_ctrl_is_sta; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_ctrl_is_std_0 = io_brupdate_b2_uop_ctrl_is_std; // @[fp-pipeline.scala:28:7]
wire [1:0] io_brupdate_b2_uop_iw_state_0 = io_brupdate_b2_uop_iw_state; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_iw_p1_poisoned_0 = io_brupdate_b2_uop_iw_p1_poisoned; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_iw_p2_poisoned_0 = io_brupdate_b2_uop_iw_p2_poisoned; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_is_br_0 = io_brupdate_b2_uop_is_br; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_is_jalr_0 = io_brupdate_b2_uop_is_jalr; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_is_jal_0 = io_brupdate_b2_uop_is_jal; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[fp-pipeline.scala:28:7]
wire [15:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[fp-pipeline.scala:28:7]
wire [3:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[fp-pipeline.scala:28:7]
wire [4:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[fp-pipeline.scala:28:7]
wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[fp-pipeline.scala:28:7]
wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[fp-pipeline.scala:28:7]
wire [11:0] io_brupdate_b2_uop_csr_addr_0 = io_brupdate_b2_uop_csr_addr; // @[fp-pipeline.scala:28:7]
wire [6:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[fp-pipeline.scala:28:7]
wire [4:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[fp-pipeline.scala:28:7]
wire [4:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[fp-pipeline.scala:28:7]
wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[fp-pipeline.scala:28:7]
wire [6:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[fp-pipeline.scala:28:7]
wire [6:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[fp-pipeline.scala:28:7]
wire [6:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[fp-pipeline.scala:28:7]
wire [6:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[fp-pipeline.scala:28:7]
wire [4:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[fp-pipeline.scala:28:7]
wire [6:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[fp-pipeline.scala:28:7]
wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_bypassable_0 = io_brupdate_b2_uop_bypassable; // @[fp-pipeline.scala:28:7]
wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[fp-pipeline.scala:28:7]
wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[fp-pipeline.scala:28:7]
wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[fp-pipeline.scala:28:7]
wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[fp-pipeline.scala:28:7]
wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[fp-pipeline.scala:28:7]
wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_ldst_val_0 = io_brupdate_b2_uop_ldst_val; // @[fp-pipeline.scala:28:7]
wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[fp-pipeline.scala:28:7]
wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[fp-pipeline.scala:28:7]
wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_fp_single_0 = io_brupdate_b2_uop_fp_single; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[fp-pipeline.scala:28:7]
wire [1:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[fp-pipeline.scala:28:7]
wire [1:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_valid_0 = io_brupdate_b2_valid; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[fp-pipeline.scala:28:7]
wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[fp-pipeline.scala:28:7]
wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[fp-pipeline.scala:28:7]
wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[fp-pipeline.scala:28:7]
wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[fp-pipeline.scala:28:7]
wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[fp-pipeline.scala:28:7]
wire io_flush_pipeline_0 = io_flush_pipeline; // @[fp-pipeline.scala:28:7]
wire [2:0] io_fcsr_rm_0 = io_fcsr_rm; // @[fp-pipeline.scala:28:7]
wire io_status_debug_0 = io_status_debug; // @[fp-pipeline.scala:28:7]
wire io_status_cease_0 = io_status_cease; // @[fp-pipeline.scala:28:7]
wire io_status_wfi_0 = io_status_wfi; // @[fp-pipeline.scala:28:7]
wire [1:0] io_status_dprv_0 = io_status_dprv; // @[fp-pipeline.scala:28:7]
wire io_status_dv_0 = io_status_dv; // @[fp-pipeline.scala:28:7]
wire [1:0] io_status_prv_0 = io_status_prv; // @[fp-pipeline.scala:28:7]
wire io_status_v_0 = io_status_v; // @[fp-pipeline.scala:28:7]
wire io_status_sd_0 = io_status_sd; // @[fp-pipeline.scala:28:7]
wire io_status_mpv_0 = io_status_mpv; // @[fp-pipeline.scala:28:7]
wire io_status_gva_0 = io_status_gva; // @[fp-pipeline.scala:28:7]
wire io_status_tsr_0 = io_status_tsr; // @[fp-pipeline.scala:28:7]
wire io_status_tw_0 = io_status_tw; // @[fp-pipeline.scala:28:7]
wire io_status_tvm_0 = io_status_tvm; // @[fp-pipeline.scala:28:7]
wire io_status_mxr_0 = io_status_mxr; // @[fp-pipeline.scala:28:7]
wire io_status_sum_0 = io_status_sum; // @[fp-pipeline.scala:28:7]
wire io_status_mprv_0 = io_status_mprv; // @[fp-pipeline.scala:28:7]
wire [1:0] io_status_fs_0 = io_status_fs; // @[fp-pipeline.scala:28:7]
wire [1:0] io_status_mpp_0 = io_status_mpp; // @[fp-pipeline.scala:28:7]
wire io_status_spp_0 = io_status_spp; // @[fp-pipeline.scala:28:7]
wire io_status_mpie_0 = io_status_mpie; // @[fp-pipeline.scala:28:7]
wire io_status_spie_0 = io_status_spie; // @[fp-pipeline.scala:28:7]
wire io_status_mie_0 = io_status_mie; // @[fp-pipeline.scala:28:7]
wire io_status_sie_0 = io_status_sie; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_valid_0 = io_dis_uops_0_valid; // @[fp-pipeline.scala:28:7]
wire [6:0] io_dis_uops_0_bits_uopc_0 = io_dis_uops_0_bits_uopc; // @[fp-pipeline.scala:28:7]
wire [31:0] io_dis_uops_0_bits_inst_0 = io_dis_uops_0_bits_inst; // @[fp-pipeline.scala:28:7]
wire [31:0] io_dis_uops_0_bits_debug_inst_0 = io_dis_uops_0_bits_debug_inst; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_is_rvc_0 = io_dis_uops_0_bits_is_rvc; // @[fp-pipeline.scala:28:7]
wire [39:0] io_dis_uops_0_bits_debug_pc_0 = io_dis_uops_0_bits_debug_pc; // @[fp-pipeline.scala:28:7]
wire [2:0] io_dis_uops_0_bits_iq_type_0 = io_dis_uops_0_bits_iq_type; // @[fp-pipeline.scala:28:7]
wire [9:0] io_dis_uops_0_bits_fu_code_0 = io_dis_uops_0_bits_fu_code; // @[fp-pipeline.scala:28:7]
wire [3:0] io_dis_uops_0_bits_ctrl_br_type_0 = io_dis_uops_0_bits_ctrl_br_type; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_0_bits_ctrl_op1_sel_0 = io_dis_uops_0_bits_ctrl_op1_sel; // @[fp-pipeline.scala:28:7]
wire [2:0] io_dis_uops_0_bits_ctrl_op2_sel_0 = io_dis_uops_0_bits_ctrl_op2_sel; // @[fp-pipeline.scala:28:7]
wire [2:0] io_dis_uops_0_bits_ctrl_imm_sel_0 = io_dis_uops_0_bits_ctrl_imm_sel; // @[fp-pipeline.scala:28:7]
wire [4:0] io_dis_uops_0_bits_ctrl_op_fcn_0 = io_dis_uops_0_bits_ctrl_op_fcn; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_ctrl_fcn_dw_0 = io_dis_uops_0_bits_ctrl_fcn_dw; // @[fp-pipeline.scala:28:7]
wire [2:0] io_dis_uops_0_bits_ctrl_csr_cmd_0 = io_dis_uops_0_bits_ctrl_csr_cmd; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_ctrl_is_load_0 = io_dis_uops_0_bits_ctrl_is_load; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_ctrl_is_sta_0 = io_dis_uops_0_bits_ctrl_is_sta; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_ctrl_is_std_0 = io_dis_uops_0_bits_ctrl_is_std; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_0_bits_iw_state_0 = io_dis_uops_0_bits_iw_state; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_iw_p1_poisoned_0 = io_dis_uops_0_bits_iw_p1_poisoned; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_iw_p2_poisoned_0 = io_dis_uops_0_bits_iw_p2_poisoned; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_is_br_0 = io_dis_uops_0_bits_is_br; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_is_jalr_0 = io_dis_uops_0_bits_is_jalr; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_is_jal_0 = io_dis_uops_0_bits_is_jal; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_is_sfb_0 = io_dis_uops_0_bits_is_sfb; // @[fp-pipeline.scala:28:7]
wire [15:0] io_dis_uops_0_bits_br_mask_0 = io_dis_uops_0_bits_br_mask; // @[fp-pipeline.scala:28:7]
wire [3:0] io_dis_uops_0_bits_br_tag_0 = io_dis_uops_0_bits_br_tag; // @[fp-pipeline.scala:28:7]
wire [4:0] io_dis_uops_0_bits_ftq_idx_0 = io_dis_uops_0_bits_ftq_idx; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_edge_inst_0 = io_dis_uops_0_bits_edge_inst; // @[fp-pipeline.scala:28:7]
wire [5:0] io_dis_uops_0_bits_pc_lob_0 = io_dis_uops_0_bits_pc_lob; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_taken_0 = io_dis_uops_0_bits_taken; // @[fp-pipeline.scala:28:7]
wire [19:0] io_dis_uops_0_bits_imm_packed_0 = io_dis_uops_0_bits_imm_packed; // @[fp-pipeline.scala:28:7]
wire [11:0] io_dis_uops_0_bits_csr_addr_0 = io_dis_uops_0_bits_csr_addr; // @[fp-pipeline.scala:28:7]
wire [6:0] io_dis_uops_0_bits_rob_idx_0 = io_dis_uops_0_bits_rob_idx; // @[fp-pipeline.scala:28:7]
wire [4:0] io_dis_uops_0_bits_ldq_idx_0 = io_dis_uops_0_bits_ldq_idx; // @[fp-pipeline.scala:28:7]
wire [4:0] io_dis_uops_0_bits_stq_idx_0 = io_dis_uops_0_bits_stq_idx; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_0_bits_rxq_idx_0 = io_dis_uops_0_bits_rxq_idx; // @[fp-pipeline.scala:28:7]
wire [6:0] io_dis_uops_0_bits_pdst_0 = io_dis_uops_0_bits_pdst; // @[fp-pipeline.scala:28:7]
wire [6:0] io_dis_uops_0_bits_prs1_0 = io_dis_uops_0_bits_prs1; // @[fp-pipeline.scala:28:7]
wire [6:0] io_dis_uops_0_bits_prs2_0 = io_dis_uops_0_bits_prs2; // @[fp-pipeline.scala:28:7]
wire [6:0] io_dis_uops_0_bits_prs3_0 = io_dis_uops_0_bits_prs3; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_prs1_busy_0 = io_dis_uops_0_bits_prs1_busy; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_prs2_busy_0 = io_dis_uops_0_bits_prs2_busy; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_prs3_busy_0 = io_dis_uops_0_bits_prs3_busy; // @[fp-pipeline.scala:28:7]
wire [6:0] io_dis_uops_0_bits_stale_pdst_0 = io_dis_uops_0_bits_stale_pdst; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_exception_0 = io_dis_uops_0_bits_exception; // @[fp-pipeline.scala:28:7]
wire [63:0] io_dis_uops_0_bits_exc_cause_0 = io_dis_uops_0_bits_exc_cause; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_bypassable_0 = io_dis_uops_0_bits_bypassable; // @[fp-pipeline.scala:28:7]
wire [4:0] io_dis_uops_0_bits_mem_cmd_0 = io_dis_uops_0_bits_mem_cmd; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_0_bits_mem_size_0 = io_dis_uops_0_bits_mem_size; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_mem_signed_0 = io_dis_uops_0_bits_mem_signed; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_is_fence_0 = io_dis_uops_0_bits_is_fence; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_is_fencei_0 = io_dis_uops_0_bits_is_fencei; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_is_amo_0 = io_dis_uops_0_bits_is_amo; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_uses_ldq_0 = io_dis_uops_0_bits_uses_ldq; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_uses_stq_0 = io_dis_uops_0_bits_uses_stq; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_is_sys_pc2epc_0 = io_dis_uops_0_bits_is_sys_pc2epc; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_is_unique_0 = io_dis_uops_0_bits_is_unique; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_flush_on_commit_0 = io_dis_uops_0_bits_flush_on_commit; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_ldst_is_rs1_0 = io_dis_uops_0_bits_ldst_is_rs1; // @[fp-pipeline.scala:28:7]
wire [5:0] io_dis_uops_0_bits_ldst_0 = io_dis_uops_0_bits_ldst; // @[fp-pipeline.scala:28:7]
wire [5:0] io_dis_uops_0_bits_lrs1_0 = io_dis_uops_0_bits_lrs1; // @[fp-pipeline.scala:28:7]
wire [5:0] io_dis_uops_0_bits_lrs2_0 = io_dis_uops_0_bits_lrs2; // @[fp-pipeline.scala:28:7]
wire [5:0] io_dis_uops_0_bits_lrs3_0 = io_dis_uops_0_bits_lrs3; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_ldst_val_0 = io_dis_uops_0_bits_ldst_val; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_0_bits_dst_rtype_0 = io_dis_uops_0_bits_dst_rtype; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_0_bits_lrs1_rtype_0 = io_dis_uops_0_bits_lrs1_rtype; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_0_bits_lrs2_rtype_0 = io_dis_uops_0_bits_lrs2_rtype; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_frs3_en_0 = io_dis_uops_0_bits_frs3_en; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_fp_val_0 = io_dis_uops_0_bits_fp_val; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_fp_single_0 = io_dis_uops_0_bits_fp_single; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_xcpt_pf_if_0 = io_dis_uops_0_bits_xcpt_pf_if; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_xcpt_ae_if_0 = io_dis_uops_0_bits_xcpt_ae_if; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_xcpt_ma_if_0 = io_dis_uops_0_bits_xcpt_ma_if; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_bp_debug_if_0 = io_dis_uops_0_bits_bp_debug_if; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_bp_xcpt_if_0 = io_dis_uops_0_bits_bp_xcpt_if; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_0_bits_debug_fsrc_0 = io_dis_uops_0_bits_debug_fsrc; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_0_bits_debug_tsrc_0 = io_dis_uops_0_bits_debug_tsrc; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_valid_0 = io_dis_uops_1_valid; // @[fp-pipeline.scala:28:7]
wire [6:0] io_dis_uops_1_bits_uopc_0 = io_dis_uops_1_bits_uopc; // @[fp-pipeline.scala:28:7]
wire [31:0] io_dis_uops_1_bits_inst_0 = io_dis_uops_1_bits_inst; // @[fp-pipeline.scala:28:7]
wire [31:0] io_dis_uops_1_bits_debug_inst_0 = io_dis_uops_1_bits_debug_inst; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_is_rvc_0 = io_dis_uops_1_bits_is_rvc; // @[fp-pipeline.scala:28:7]
wire [39:0] io_dis_uops_1_bits_debug_pc_0 = io_dis_uops_1_bits_debug_pc; // @[fp-pipeline.scala:28:7]
wire [2:0] io_dis_uops_1_bits_iq_type_0 = io_dis_uops_1_bits_iq_type; // @[fp-pipeline.scala:28:7]
wire [9:0] io_dis_uops_1_bits_fu_code_0 = io_dis_uops_1_bits_fu_code; // @[fp-pipeline.scala:28:7]
wire [3:0] io_dis_uops_1_bits_ctrl_br_type_0 = io_dis_uops_1_bits_ctrl_br_type; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_1_bits_ctrl_op1_sel_0 = io_dis_uops_1_bits_ctrl_op1_sel; // @[fp-pipeline.scala:28:7]
wire [2:0] io_dis_uops_1_bits_ctrl_op2_sel_0 = io_dis_uops_1_bits_ctrl_op2_sel; // @[fp-pipeline.scala:28:7]
wire [2:0] io_dis_uops_1_bits_ctrl_imm_sel_0 = io_dis_uops_1_bits_ctrl_imm_sel; // @[fp-pipeline.scala:28:7]
wire [4:0] io_dis_uops_1_bits_ctrl_op_fcn_0 = io_dis_uops_1_bits_ctrl_op_fcn; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_ctrl_fcn_dw_0 = io_dis_uops_1_bits_ctrl_fcn_dw; // @[fp-pipeline.scala:28:7]
wire [2:0] io_dis_uops_1_bits_ctrl_csr_cmd_0 = io_dis_uops_1_bits_ctrl_csr_cmd; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_ctrl_is_load_0 = io_dis_uops_1_bits_ctrl_is_load; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_ctrl_is_sta_0 = io_dis_uops_1_bits_ctrl_is_sta; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_ctrl_is_std_0 = io_dis_uops_1_bits_ctrl_is_std; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_1_bits_iw_state_0 = io_dis_uops_1_bits_iw_state; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_iw_p1_poisoned_0 = io_dis_uops_1_bits_iw_p1_poisoned; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_iw_p2_poisoned_0 = io_dis_uops_1_bits_iw_p2_poisoned; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_is_br_0 = io_dis_uops_1_bits_is_br; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_is_jalr_0 = io_dis_uops_1_bits_is_jalr; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_is_jal_0 = io_dis_uops_1_bits_is_jal; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_is_sfb_0 = io_dis_uops_1_bits_is_sfb; // @[fp-pipeline.scala:28:7]
wire [15:0] io_dis_uops_1_bits_br_mask_0 = io_dis_uops_1_bits_br_mask; // @[fp-pipeline.scala:28:7]
wire [3:0] io_dis_uops_1_bits_br_tag_0 = io_dis_uops_1_bits_br_tag; // @[fp-pipeline.scala:28:7]
wire [4:0] io_dis_uops_1_bits_ftq_idx_0 = io_dis_uops_1_bits_ftq_idx; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_edge_inst_0 = io_dis_uops_1_bits_edge_inst; // @[fp-pipeline.scala:28:7]
wire [5:0] io_dis_uops_1_bits_pc_lob_0 = io_dis_uops_1_bits_pc_lob; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_taken_0 = io_dis_uops_1_bits_taken; // @[fp-pipeline.scala:28:7]
wire [19:0] io_dis_uops_1_bits_imm_packed_0 = io_dis_uops_1_bits_imm_packed; // @[fp-pipeline.scala:28:7]
wire [11:0] io_dis_uops_1_bits_csr_addr_0 = io_dis_uops_1_bits_csr_addr; // @[fp-pipeline.scala:28:7]
wire [6:0] io_dis_uops_1_bits_rob_idx_0 = io_dis_uops_1_bits_rob_idx; // @[fp-pipeline.scala:28:7]
wire [4:0] io_dis_uops_1_bits_ldq_idx_0 = io_dis_uops_1_bits_ldq_idx; // @[fp-pipeline.scala:28:7]
wire [4:0] io_dis_uops_1_bits_stq_idx_0 = io_dis_uops_1_bits_stq_idx; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_1_bits_rxq_idx_0 = io_dis_uops_1_bits_rxq_idx; // @[fp-pipeline.scala:28:7]
wire [6:0] io_dis_uops_1_bits_pdst_0 = io_dis_uops_1_bits_pdst; // @[fp-pipeline.scala:28:7]
wire [6:0] io_dis_uops_1_bits_prs1_0 = io_dis_uops_1_bits_prs1; // @[fp-pipeline.scala:28:7]
wire [6:0] io_dis_uops_1_bits_prs2_0 = io_dis_uops_1_bits_prs2; // @[fp-pipeline.scala:28:7]
wire [6:0] io_dis_uops_1_bits_prs3_0 = io_dis_uops_1_bits_prs3; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_prs1_busy_0 = io_dis_uops_1_bits_prs1_busy; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_prs2_busy_0 = io_dis_uops_1_bits_prs2_busy; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_prs3_busy_0 = io_dis_uops_1_bits_prs3_busy; // @[fp-pipeline.scala:28:7]
wire [6:0] io_dis_uops_1_bits_stale_pdst_0 = io_dis_uops_1_bits_stale_pdst; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_exception_0 = io_dis_uops_1_bits_exception; // @[fp-pipeline.scala:28:7]
wire [63:0] io_dis_uops_1_bits_exc_cause_0 = io_dis_uops_1_bits_exc_cause; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_bypassable_0 = io_dis_uops_1_bits_bypassable; // @[fp-pipeline.scala:28:7]
wire [4:0] io_dis_uops_1_bits_mem_cmd_0 = io_dis_uops_1_bits_mem_cmd; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_1_bits_mem_size_0 = io_dis_uops_1_bits_mem_size; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_mem_signed_0 = io_dis_uops_1_bits_mem_signed; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_is_fence_0 = io_dis_uops_1_bits_is_fence; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_is_fencei_0 = io_dis_uops_1_bits_is_fencei; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_is_amo_0 = io_dis_uops_1_bits_is_amo; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_uses_ldq_0 = io_dis_uops_1_bits_uses_ldq; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_uses_stq_0 = io_dis_uops_1_bits_uses_stq; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_is_sys_pc2epc_0 = io_dis_uops_1_bits_is_sys_pc2epc; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_is_unique_0 = io_dis_uops_1_bits_is_unique; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_flush_on_commit_0 = io_dis_uops_1_bits_flush_on_commit; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_ldst_is_rs1_0 = io_dis_uops_1_bits_ldst_is_rs1; // @[fp-pipeline.scala:28:7]
wire [5:0] io_dis_uops_1_bits_ldst_0 = io_dis_uops_1_bits_ldst; // @[fp-pipeline.scala:28:7]
wire [5:0] io_dis_uops_1_bits_lrs1_0 = io_dis_uops_1_bits_lrs1; // @[fp-pipeline.scala:28:7]
wire [5:0] io_dis_uops_1_bits_lrs2_0 = io_dis_uops_1_bits_lrs2; // @[fp-pipeline.scala:28:7]
wire [5:0] io_dis_uops_1_bits_lrs3_0 = io_dis_uops_1_bits_lrs3; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_ldst_val_0 = io_dis_uops_1_bits_ldst_val; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_1_bits_dst_rtype_0 = io_dis_uops_1_bits_dst_rtype; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_1_bits_lrs1_rtype_0 = io_dis_uops_1_bits_lrs1_rtype; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_1_bits_lrs2_rtype_0 = io_dis_uops_1_bits_lrs2_rtype; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_frs3_en_0 = io_dis_uops_1_bits_frs3_en; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_fp_val_0 = io_dis_uops_1_bits_fp_val; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_fp_single_0 = io_dis_uops_1_bits_fp_single; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_xcpt_pf_if_0 = io_dis_uops_1_bits_xcpt_pf_if; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_xcpt_ae_if_0 = io_dis_uops_1_bits_xcpt_ae_if; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_xcpt_ma_if_0 = io_dis_uops_1_bits_xcpt_ma_if; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_bp_debug_if_0 = io_dis_uops_1_bits_bp_debug_if; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_bp_xcpt_if_0 = io_dis_uops_1_bits_bp_xcpt_if; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_1_bits_debug_fsrc_0 = io_dis_uops_1_bits_debug_fsrc; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_1_bits_debug_tsrc_0 = io_dis_uops_1_bits_debug_tsrc; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_valid_0 = io_dis_uops_2_valid; // @[fp-pipeline.scala:28:7]
wire [6:0] io_dis_uops_2_bits_uopc_0 = io_dis_uops_2_bits_uopc; // @[fp-pipeline.scala:28:7]
wire [31:0] io_dis_uops_2_bits_inst_0 = io_dis_uops_2_bits_inst; // @[fp-pipeline.scala:28:7]
wire [31:0] io_dis_uops_2_bits_debug_inst_0 = io_dis_uops_2_bits_debug_inst; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_is_rvc_0 = io_dis_uops_2_bits_is_rvc; // @[fp-pipeline.scala:28:7]
wire [39:0] io_dis_uops_2_bits_debug_pc_0 = io_dis_uops_2_bits_debug_pc; // @[fp-pipeline.scala:28:7]
wire [2:0] io_dis_uops_2_bits_iq_type_0 = io_dis_uops_2_bits_iq_type; // @[fp-pipeline.scala:28:7]
wire [9:0] io_dis_uops_2_bits_fu_code_0 = io_dis_uops_2_bits_fu_code; // @[fp-pipeline.scala:28:7]
wire [3:0] io_dis_uops_2_bits_ctrl_br_type_0 = io_dis_uops_2_bits_ctrl_br_type; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_2_bits_ctrl_op1_sel_0 = io_dis_uops_2_bits_ctrl_op1_sel; // @[fp-pipeline.scala:28:7]
wire [2:0] io_dis_uops_2_bits_ctrl_op2_sel_0 = io_dis_uops_2_bits_ctrl_op2_sel; // @[fp-pipeline.scala:28:7]
wire [2:0] io_dis_uops_2_bits_ctrl_imm_sel_0 = io_dis_uops_2_bits_ctrl_imm_sel; // @[fp-pipeline.scala:28:7]
wire [4:0] io_dis_uops_2_bits_ctrl_op_fcn_0 = io_dis_uops_2_bits_ctrl_op_fcn; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_ctrl_fcn_dw_0 = io_dis_uops_2_bits_ctrl_fcn_dw; // @[fp-pipeline.scala:28:7]
wire [2:0] io_dis_uops_2_bits_ctrl_csr_cmd_0 = io_dis_uops_2_bits_ctrl_csr_cmd; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_ctrl_is_load_0 = io_dis_uops_2_bits_ctrl_is_load; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_ctrl_is_sta_0 = io_dis_uops_2_bits_ctrl_is_sta; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_ctrl_is_std_0 = io_dis_uops_2_bits_ctrl_is_std; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_2_bits_iw_state_0 = io_dis_uops_2_bits_iw_state; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_iw_p1_poisoned_0 = io_dis_uops_2_bits_iw_p1_poisoned; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_iw_p2_poisoned_0 = io_dis_uops_2_bits_iw_p2_poisoned; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_is_br_0 = io_dis_uops_2_bits_is_br; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_is_jalr_0 = io_dis_uops_2_bits_is_jalr; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_is_jal_0 = io_dis_uops_2_bits_is_jal; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_is_sfb_0 = io_dis_uops_2_bits_is_sfb; // @[fp-pipeline.scala:28:7]
wire [15:0] io_dis_uops_2_bits_br_mask_0 = io_dis_uops_2_bits_br_mask; // @[fp-pipeline.scala:28:7]
wire [3:0] io_dis_uops_2_bits_br_tag_0 = io_dis_uops_2_bits_br_tag; // @[fp-pipeline.scala:28:7]
wire [4:0] io_dis_uops_2_bits_ftq_idx_0 = io_dis_uops_2_bits_ftq_idx; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_edge_inst_0 = io_dis_uops_2_bits_edge_inst; // @[fp-pipeline.scala:28:7]
wire [5:0] io_dis_uops_2_bits_pc_lob_0 = io_dis_uops_2_bits_pc_lob; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_taken_0 = io_dis_uops_2_bits_taken; // @[fp-pipeline.scala:28:7]
wire [19:0] io_dis_uops_2_bits_imm_packed_0 = io_dis_uops_2_bits_imm_packed; // @[fp-pipeline.scala:28:7]
wire [11:0] io_dis_uops_2_bits_csr_addr_0 = io_dis_uops_2_bits_csr_addr; // @[fp-pipeline.scala:28:7]
wire [6:0] io_dis_uops_2_bits_rob_idx_0 = io_dis_uops_2_bits_rob_idx; // @[fp-pipeline.scala:28:7]
wire [4:0] io_dis_uops_2_bits_ldq_idx_0 = io_dis_uops_2_bits_ldq_idx; // @[fp-pipeline.scala:28:7]
wire [4:0] io_dis_uops_2_bits_stq_idx_0 = io_dis_uops_2_bits_stq_idx; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_2_bits_rxq_idx_0 = io_dis_uops_2_bits_rxq_idx; // @[fp-pipeline.scala:28:7]
wire [6:0] io_dis_uops_2_bits_pdst_0 = io_dis_uops_2_bits_pdst; // @[fp-pipeline.scala:28:7]
wire [6:0] io_dis_uops_2_bits_prs1_0 = io_dis_uops_2_bits_prs1; // @[fp-pipeline.scala:28:7]
wire [6:0] io_dis_uops_2_bits_prs2_0 = io_dis_uops_2_bits_prs2; // @[fp-pipeline.scala:28:7]
wire [6:0] io_dis_uops_2_bits_prs3_0 = io_dis_uops_2_bits_prs3; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_prs1_busy_0 = io_dis_uops_2_bits_prs1_busy; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_prs2_busy_0 = io_dis_uops_2_bits_prs2_busy; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_prs3_busy_0 = io_dis_uops_2_bits_prs3_busy; // @[fp-pipeline.scala:28:7]
wire [6:0] io_dis_uops_2_bits_stale_pdst_0 = io_dis_uops_2_bits_stale_pdst; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_exception_0 = io_dis_uops_2_bits_exception; // @[fp-pipeline.scala:28:7]
wire [63:0] io_dis_uops_2_bits_exc_cause_0 = io_dis_uops_2_bits_exc_cause; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_bypassable_0 = io_dis_uops_2_bits_bypassable; // @[fp-pipeline.scala:28:7]
wire [4:0] io_dis_uops_2_bits_mem_cmd_0 = io_dis_uops_2_bits_mem_cmd; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_2_bits_mem_size_0 = io_dis_uops_2_bits_mem_size; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_mem_signed_0 = io_dis_uops_2_bits_mem_signed; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_is_fence_0 = io_dis_uops_2_bits_is_fence; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_is_fencei_0 = io_dis_uops_2_bits_is_fencei; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_is_amo_0 = io_dis_uops_2_bits_is_amo; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_uses_ldq_0 = io_dis_uops_2_bits_uses_ldq; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_uses_stq_0 = io_dis_uops_2_bits_uses_stq; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_is_sys_pc2epc_0 = io_dis_uops_2_bits_is_sys_pc2epc; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_is_unique_0 = io_dis_uops_2_bits_is_unique; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_flush_on_commit_0 = io_dis_uops_2_bits_flush_on_commit; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_ldst_is_rs1_0 = io_dis_uops_2_bits_ldst_is_rs1; // @[fp-pipeline.scala:28:7]
wire [5:0] io_dis_uops_2_bits_ldst_0 = io_dis_uops_2_bits_ldst; // @[fp-pipeline.scala:28:7]
wire [5:0] io_dis_uops_2_bits_lrs1_0 = io_dis_uops_2_bits_lrs1; // @[fp-pipeline.scala:28:7]
wire [5:0] io_dis_uops_2_bits_lrs2_0 = io_dis_uops_2_bits_lrs2; // @[fp-pipeline.scala:28:7]
wire [5:0] io_dis_uops_2_bits_lrs3_0 = io_dis_uops_2_bits_lrs3; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_ldst_val_0 = io_dis_uops_2_bits_ldst_val; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_2_bits_dst_rtype_0 = io_dis_uops_2_bits_dst_rtype; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_2_bits_lrs1_rtype_0 = io_dis_uops_2_bits_lrs1_rtype; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_2_bits_lrs2_rtype_0 = io_dis_uops_2_bits_lrs2_rtype; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_frs3_en_0 = io_dis_uops_2_bits_frs3_en; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_fp_val_0 = io_dis_uops_2_bits_fp_val; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_fp_single_0 = io_dis_uops_2_bits_fp_single; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_xcpt_pf_if_0 = io_dis_uops_2_bits_xcpt_pf_if; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_xcpt_ae_if_0 = io_dis_uops_2_bits_xcpt_ae_if; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_xcpt_ma_if_0 = io_dis_uops_2_bits_xcpt_ma_if; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_bp_debug_if_0 = io_dis_uops_2_bits_bp_debug_if; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_bp_xcpt_if_0 = io_dis_uops_2_bits_bp_xcpt_if; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_2_bits_debug_fsrc_0 = io_dis_uops_2_bits_debug_fsrc; // @[fp-pipeline.scala:28:7]
wire [1:0] io_dis_uops_2_bits_debug_tsrc_0 = io_dis_uops_2_bits_debug_tsrc; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_valid_0 = io_ll_wports_0_valid; // @[fp-pipeline.scala:28:7]
wire [6:0] io_ll_wports_0_bits_uop_uopc_0 = io_ll_wports_0_bits_uop_uopc; // @[fp-pipeline.scala:28:7]
wire [31:0] io_ll_wports_0_bits_uop_inst_0 = io_ll_wports_0_bits_uop_inst; // @[fp-pipeline.scala:28:7]
wire [31:0] io_ll_wports_0_bits_uop_debug_inst_0 = io_ll_wports_0_bits_uop_debug_inst; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_is_rvc_0 = io_ll_wports_0_bits_uop_is_rvc; // @[fp-pipeline.scala:28:7]
wire [39:0] io_ll_wports_0_bits_uop_debug_pc_0 = io_ll_wports_0_bits_uop_debug_pc; // @[fp-pipeline.scala:28:7]
wire [2:0] io_ll_wports_0_bits_uop_iq_type_0 = io_ll_wports_0_bits_uop_iq_type; // @[fp-pipeline.scala:28:7]
wire [9:0] io_ll_wports_0_bits_uop_fu_code_0 = io_ll_wports_0_bits_uop_fu_code; // @[fp-pipeline.scala:28:7]
wire [3:0] io_ll_wports_0_bits_uop_ctrl_br_type_0 = io_ll_wports_0_bits_uop_ctrl_br_type; // @[fp-pipeline.scala:28:7]
wire [1:0] io_ll_wports_0_bits_uop_ctrl_op1_sel_0 = io_ll_wports_0_bits_uop_ctrl_op1_sel; // @[fp-pipeline.scala:28:7]
wire [2:0] io_ll_wports_0_bits_uop_ctrl_op2_sel_0 = io_ll_wports_0_bits_uop_ctrl_op2_sel; // @[fp-pipeline.scala:28:7]
wire [2:0] io_ll_wports_0_bits_uop_ctrl_imm_sel_0 = io_ll_wports_0_bits_uop_ctrl_imm_sel; // @[fp-pipeline.scala:28:7]
wire [4:0] io_ll_wports_0_bits_uop_ctrl_op_fcn_0 = io_ll_wports_0_bits_uop_ctrl_op_fcn; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_ctrl_fcn_dw_0 = io_ll_wports_0_bits_uop_ctrl_fcn_dw; // @[fp-pipeline.scala:28:7]
wire [2:0] io_ll_wports_0_bits_uop_ctrl_csr_cmd_0 = io_ll_wports_0_bits_uop_ctrl_csr_cmd; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_ctrl_is_load_0 = io_ll_wports_0_bits_uop_ctrl_is_load; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_ctrl_is_sta_0 = io_ll_wports_0_bits_uop_ctrl_is_sta; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_ctrl_is_std_0 = io_ll_wports_0_bits_uop_ctrl_is_std; // @[fp-pipeline.scala:28:7]
wire [1:0] io_ll_wports_0_bits_uop_iw_state_0 = io_ll_wports_0_bits_uop_iw_state; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_iw_p1_poisoned_0 = io_ll_wports_0_bits_uop_iw_p1_poisoned; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_iw_p2_poisoned_0 = io_ll_wports_0_bits_uop_iw_p2_poisoned; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_is_br_0 = io_ll_wports_0_bits_uop_is_br; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_is_jalr_0 = io_ll_wports_0_bits_uop_is_jalr; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_is_jal_0 = io_ll_wports_0_bits_uop_is_jal; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_is_sfb_0 = io_ll_wports_0_bits_uop_is_sfb; // @[fp-pipeline.scala:28:7]
wire [15:0] io_ll_wports_0_bits_uop_br_mask_0 = io_ll_wports_0_bits_uop_br_mask; // @[fp-pipeline.scala:28:7]
wire [3:0] io_ll_wports_0_bits_uop_br_tag_0 = io_ll_wports_0_bits_uop_br_tag; // @[fp-pipeline.scala:28:7]
wire [4:0] io_ll_wports_0_bits_uop_ftq_idx_0 = io_ll_wports_0_bits_uop_ftq_idx; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_edge_inst_0 = io_ll_wports_0_bits_uop_edge_inst; // @[fp-pipeline.scala:28:7]
wire [5:0] io_ll_wports_0_bits_uop_pc_lob_0 = io_ll_wports_0_bits_uop_pc_lob; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_taken_0 = io_ll_wports_0_bits_uop_taken; // @[fp-pipeline.scala:28:7]
wire [19:0] io_ll_wports_0_bits_uop_imm_packed_0 = io_ll_wports_0_bits_uop_imm_packed; // @[fp-pipeline.scala:28:7]
wire [11:0] io_ll_wports_0_bits_uop_csr_addr_0 = io_ll_wports_0_bits_uop_csr_addr; // @[fp-pipeline.scala:28:7]
wire [6:0] io_ll_wports_0_bits_uop_rob_idx_0 = io_ll_wports_0_bits_uop_rob_idx; // @[fp-pipeline.scala:28:7]
wire [4:0] io_ll_wports_0_bits_uop_ldq_idx_0 = io_ll_wports_0_bits_uop_ldq_idx; // @[fp-pipeline.scala:28:7]
wire [4:0] io_ll_wports_0_bits_uop_stq_idx_0 = io_ll_wports_0_bits_uop_stq_idx; // @[fp-pipeline.scala:28:7]
wire [1:0] io_ll_wports_0_bits_uop_rxq_idx_0 = io_ll_wports_0_bits_uop_rxq_idx; // @[fp-pipeline.scala:28:7]
wire [6:0] io_ll_wports_0_bits_uop_pdst_0 = io_ll_wports_0_bits_uop_pdst; // @[fp-pipeline.scala:28:7]
wire [6:0] io_ll_wports_0_bits_uop_prs1_0 = io_ll_wports_0_bits_uop_prs1; // @[fp-pipeline.scala:28:7]
wire [6:0] io_ll_wports_0_bits_uop_prs2_0 = io_ll_wports_0_bits_uop_prs2; // @[fp-pipeline.scala:28:7]
wire [6:0] io_ll_wports_0_bits_uop_prs3_0 = io_ll_wports_0_bits_uop_prs3; // @[fp-pipeline.scala:28:7]
wire [4:0] io_ll_wports_0_bits_uop_ppred_0 = io_ll_wports_0_bits_uop_ppred; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_prs1_busy_0 = io_ll_wports_0_bits_uop_prs1_busy; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_prs2_busy_0 = io_ll_wports_0_bits_uop_prs2_busy; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_prs3_busy_0 = io_ll_wports_0_bits_uop_prs3_busy; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_ppred_busy_0 = io_ll_wports_0_bits_uop_ppred_busy; // @[fp-pipeline.scala:28:7]
wire [6:0] io_ll_wports_0_bits_uop_stale_pdst_0 = io_ll_wports_0_bits_uop_stale_pdst; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_exception_0 = io_ll_wports_0_bits_uop_exception; // @[fp-pipeline.scala:28:7]
wire [63:0] io_ll_wports_0_bits_uop_exc_cause_0 = io_ll_wports_0_bits_uop_exc_cause; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_bypassable_0 = io_ll_wports_0_bits_uop_bypassable; // @[fp-pipeline.scala:28:7]
wire [4:0] io_ll_wports_0_bits_uop_mem_cmd_0 = io_ll_wports_0_bits_uop_mem_cmd; // @[fp-pipeline.scala:28:7]
wire [1:0] io_ll_wports_0_bits_uop_mem_size_0 = io_ll_wports_0_bits_uop_mem_size; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_mem_signed_0 = io_ll_wports_0_bits_uop_mem_signed; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_is_fence_0 = io_ll_wports_0_bits_uop_is_fence; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_is_fencei_0 = io_ll_wports_0_bits_uop_is_fencei; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_is_amo_0 = io_ll_wports_0_bits_uop_is_amo; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_uses_ldq_0 = io_ll_wports_0_bits_uop_uses_ldq; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_uses_stq_0 = io_ll_wports_0_bits_uop_uses_stq; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_is_sys_pc2epc_0 = io_ll_wports_0_bits_uop_is_sys_pc2epc; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_is_unique_0 = io_ll_wports_0_bits_uop_is_unique; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_flush_on_commit_0 = io_ll_wports_0_bits_uop_flush_on_commit; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_ldst_is_rs1_0 = io_ll_wports_0_bits_uop_ldst_is_rs1; // @[fp-pipeline.scala:28:7]
wire [5:0] io_ll_wports_0_bits_uop_ldst_0 = io_ll_wports_0_bits_uop_ldst; // @[fp-pipeline.scala:28:7]
wire [5:0] io_ll_wports_0_bits_uop_lrs1_0 = io_ll_wports_0_bits_uop_lrs1; // @[fp-pipeline.scala:28:7]
wire [5:0] io_ll_wports_0_bits_uop_lrs2_0 = io_ll_wports_0_bits_uop_lrs2; // @[fp-pipeline.scala:28:7]
wire [5:0] io_ll_wports_0_bits_uop_lrs3_0 = io_ll_wports_0_bits_uop_lrs3; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_ldst_val_0 = io_ll_wports_0_bits_uop_ldst_val; // @[fp-pipeline.scala:28:7]
wire [1:0] io_ll_wports_0_bits_uop_dst_rtype_0 = io_ll_wports_0_bits_uop_dst_rtype; // @[fp-pipeline.scala:28:7]
wire [1:0] io_ll_wports_0_bits_uop_lrs1_rtype_0 = io_ll_wports_0_bits_uop_lrs1_rtype; // @[fp-pipeline.scala:28:7]
wire [1:0] io_ll_wports_0_bits_uop_lrs2_rtype_0 = io_ll_wports_0_bits_uop_lrs2_rtype; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_frs3_en_0 = io_ll_wports_0_bits_uop_frs3_en; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_fp_val_0 = io_ll_wports_0_bits_uop_fp_val; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_fp_single_0 = io_ll_wports_0_bits_uop_fp_single; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_xcpt_pf_if_0 = io_ll_wports_0_bits_uop_xcpt_pf_if; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_xcpt_ae_if_0 = io_ll_wports_0_bits_uop_xcpt_ae_if; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_xcpt_ma_if_0 = io_ll_wports_0_bits_uop_xcpt_ma_if; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_bp_debug_if_0 = io_ll_wports_0_bits_uop_bp_debug_if; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_uop_bp_xcpt_if_0 = io_ll_wports_0_bits_uop_bp_xcpt_if; // @[fp-pipeline.scala:28:7]
wire [1:0] io_ll_wports_0_bits_uop_debug_fsrc_0 = io_ll_wports_0_bits_uop_debug_fsrc; // @[fp-pipeline.scala:28:7]
wire [1:0] io_ll_wports_0_bits_uop_debug_tsrc_0 = io_ll_wports_0_bits_uop_debug_tsrc; // @[fp-pipeline.scala:28:7]
wire [64:0] io_ll_wports_0_bits_data_0 = io_ll_wports_0_bits_data; // @[fp-pipeline.scala:28:7]
wire io_from_int_valid_0 = io_from_int_valid; // @[fp-pipeline.scala:28:7]
wire [6:0] io_from_int_bits_uop_uopc_0 = io_from_int_bits_uop_uopc; // @[fp-pipeline.scala:28:7]
wire [31:0] io_from_int_bits_uop_inst_0 = io_from_int_bits_uop_inst; // @[fp-pipeline.scala:28:7]
wire [31:0] io_from_int_bits_uop_debug_inst_0 = io_from_int_bits_uop_debug_inst; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_is_rvc_0 = io_from_int_bits_uop_is_rvc; // @[fp-pipeline.scala:28:7]
wire [39:0] io_from_int_bits_uop_debug_pc_0 = io_from_int_bits_uop_debug_pc; // @[fp-pipeline.scala:28:7]
wire [2:0] io_from_int_bits_uop_iq_type_0 = io_from_int_bits_uop_iq_type; // @[fp-pipeline.scala:28:7]
wire [9:0] io_from_int_bits_uop_fu_code_0 = io_from_int_bits_uop_fu_code; // @[fp-pipeline.scala:28:7]
wire [3:0] io_from_int_bits_uop_ctrl_br_type_0 = io_from_int_bits_uop_ctrl_br_type; // @[fp-pipeline.scala:28:7]
wire [1:0] io_from_int_bits_uop_ctrl_op1_sel_0 = io_from_int_bits_uop_ctrl_op1_sel; // @[fp-pipeline.scala:28:7]
wire [2:0] io_from_int_bits_uop_ctrl_op2_sel_0 = io_from_int_bits_uop_ctrl_op2_sel; // @[fp-pipeline.scala:28:7]
wire [2:0] io_from_int_bits_uop_ctrl_imm_sel_0 = io_from_int_bits_uop_ctrl_imm_sel; // @[fp-pipeline.scala:28:7]
wire [4:0] io_from_int_bits_uop_ctrl_op_fcn_0 = io_from_int_bits_uop_ctrl_op_fcn; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_ctrl_fcn_dw_0 = io_from_int_bits_uop_ctrl_fcn_dw; // @[fp-pipeline.scala:28:7]
wire [2:0] io_from_int_bits_uop_ctrl_csr_cmd_0 = io_from_int_bits_uop_ctrl_csr_cmd; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_ctrl_is_load_0 = io_from_int_bits_uop_ctrl_is_load; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_ctrl_is_sta_0 = io_from_int_bits_uop_ctrl_is_sta; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_ctrl_is_std_0 = io_from_int_bits_uop_ctrl_is_std; // @[fp-pipeline.scala:28:7]
wire [1:0] io_from_int_bits_uop_iw_state_0 = io_from_int_bits_uop_iw_state; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_iw_p1_poisoned_0 = io_from_int_bits_uop_iw_p1_poisoned; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_iw_p2_poisoned_0 = io_from_int_bits_uop_iw_p2_poisoned; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_is_br_0 = io_from_int_bits_uop_is_br; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_is_jalr_0 = io_from_int_bits_uop_is_jalr; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_is_jal_0 = io_from_int_bits_uop_is_jal; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_is_sfb_0 = io_from_int_bits_uop_is_sfb; // @[fp-pipeline.scala:28:7]
wire [15:0] io_from_int_bits_uop_br_mask_0 = io_from_int_bits_uop_br_mask; // @[fp-pipeline.scala:28:7]
wire [3:0] io_from_int_bits_uop_br_tag_0 = io_from_int_bits_uop_br_tag; // @[fp-pipeline.scala:28:7]
wire [4:0] io_from_int_bits_uop_ftq_idx_0 = io_from_int_bits_uop_ftq_idx; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_edge_inst_0 = io_from_int_bits_uop_edge_inst; // @[fp-pipeline.scala:28:7]
wire [5:0] io_from_int_bits_uop_pc_lob_0 = io_from_int_bits_uop_pc_lob; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_taken_0 = io_from_int_bits_uop_taken; // @[fp-pipeline.scala:28:7]
wire [19:0] io_from_int_bits_uop_imm_packed_0 = io_from_int_bits_uop_imm_packed; // @[fp-pipeline.scala:28:7]
wire [11:0] io_from_int_bits_uop_csr_addr_0 = io_from_int_bits_uop_csr_addr; // @[fp-pipeline.scala:28:7]
wire [6:0] io_from_int_bits_uop_rob_idx_0 = io_from_int_bits_uop_rob_idx; // @[fp-pipeline.scala:28:7]
wire [4:0] io_from_int_bits_uop_ldq_idx_0 = io_from_int_bits_uop_ldq_idx; // @[fp-pipeline.scala:28:7]
wire [4:0] io_from_int_bits_uop_stq_idx_0 = io_from_int_bits_uop_stq_idx; // @[fp-pipeline.scala:28:7]
wire [1:0] io_from_int_bits_uop_rxq_idx_0 = io_from_int_bits_uop_rxq_idx; // @[fp-pipeline.scala:28:7]
wire [6:0] io_from_int_bits_uop_pdst_0 = io_from_int_bits_uop_pdst; // @[fp-pipeline.scala:28:7]
wire [6:0] io_from_int_bits_uop_prs1_0 = io_from_int_bits_uop_prs1; // @[fp-pipeline.scala:28:7]
wire [6:0] io_from_int_bits_uop_prs2_0 = io_from_int_bits_uop_prs2; // @[fp-pipeline.scala:28:7]
wire [6:0] io_from_int_bits_uop_prs3_0 = io_from_int_bits_uop_prs3; // @[fp-pipeline.scala:28:7]
wire [4:0] io_from_int_bits_uop_ppred_0 = io_from_int_bits_uop_ppred; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_prs1_busy_0 = io_from_int_bits_uop_prs1_busy; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_prs2_busy_0 = io_from_int_bits_uop_prs2_busy; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_prs3_busy_0 = io_from_int_bits_uop_prs3_busy; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_ppred_busy_0 = io_from_int_bits_uop_ppred_busy; // @[fp-pipeline.scala:28:7]
wire [6:0] io_from_int_bits_uop_stale_pdst_0 = io_from_int_bits_uop_stale_pdst; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_exception_0 = io_from_int_bits_uop_exception; // @[fp-pipeline.scala:28:7]
wire [63:0] io_from_int_bits_uop_exc_cause_0 = io_from_int_bits_uop_exc_cause; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_bypassable_0 = io_from_int_bits_uop_bypassable; // @[fp-pipeline.scala:28:7]
wire [4:0] io_from_int_bits_uop_mem_cmd_0 = io_from_int_bits_uop_mem_cmd; // @[fp-pipeline.scala:28:7]
wire [1:0] io_from_int_bits_uop_mem_size_0 = io_from_int_bits_uop_mem_size; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_mem_signed_0 = io_from_int_bits_uop_mem_signed; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_is_fence_0 = io_from_int_bits_uop_is_fence; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_is_fencei_0 = io_from_int_bits_uop_is_fencei; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_is_amo_0 = io_from_int_bits_uop_is_amo; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_uses_ldq_0 = io_from_int_bits_uop_uses_ldq; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_uses_stq_0 = io_from_int_bits_uop_uses_stq; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_is_sys_pc2epc_0 = io_from_int_bits_uop_is_sys_pc2epc; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_is_unique_0 = io_from_int_bits_uop_is_unique; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_flush_on_commit_0 = io_from_int_bits_uop_flush_on_commit; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_ldst_is_rs1_0 = io_from_int_bits_uop_ldst_is_rs1; // @[fp-pipeline.scala:28:7]
wire [5:0] io_from_int_bits_uop_ldst_0 = io_from_int_bits_uop_ldst; // @[fp-pipeline.scala:28:7]
wire [5:0] io_from_int_bits_uop_lrs1_0 = io_from_int_bits_uop_lrs1; // @[fp-pipeline.scala:28:7]
wire [5:0] io_from_int_bits_uop_lrs2_0 = io_from_int_bits_uop_lrs2; // @[fp-pipeline.scala:28:7]
wire [5:0] io_from_int_bits_uop_lrs3_0 = io_from_int_bits_uop_lrs3; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_ldst_val_0 = io_from_int_bits_uop_ldst_val; // @[fp-pipeline.scala:28:7]
wire [1:0] io_from_int_bits_uop_dst_rtype_0 = io_from_int_bits_uop_dst_rtype; // @[fp-pipeline.scala:28:7]
wire [1:0] io_from_int_bits_uop_lrs1_rtype_0 = io_from_int_bits_uop_lrs1_rtype; // @[fp-pipeline.scala:28:7]
wire [1:0] io_from_int_bits_uop_lrs2_rtype_0 = io_from_int_bits_uop_lrs2_rtype; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_frs3_en_0 = io_from_int_bits_uop_frs3_en; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_fp_val_0 = io_from_int_bits_uop_fp_val; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_fp_single_0 = io_from_int_bits_uop_fp_single; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_xcpt_pf_if_0 = io_from_int_bits_uop_xcpt_pf_if; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_xcpt_ae_if_0 = io_from_int_bits_uop_xcpt_ae_if; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_xcpt_ma_if_0 = io_from_int_bits_uop_xcpt_ma_if; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_bp_debug_if_0 = io_from_int_bits_uop_bp_debug_if; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_uop_bp_xcpt_if_0 = io_from_int_bits_uop_bp_xcpt_if; // @[fp-pipeline.scala:28:7]
wire [1:0] io_from_int_bits_uop_debug_fsrc_0 = io_from_int_bits_uop_debug_fsrc; // @[fp-pipeline.scala:28:7]
wire [1:0] io_from_int_bits_uop_debug_tsrc_0 = io_from_int_bits_uop_debug_tsrc; // @[fp-pipeline.scala:28:7]
wire [64:0] io_from_int_bits_data_0 = io_from_int_bits_data; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_predicated_0 = io_from_int_bits_predicated; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_valid_0 = io_from_int_bits_fflags_valid; // @[fp-pipeline.scala:28:7]
wire [6:0] io_from_int_bits_fflags_bits_uop_uopc_0 = io_from_int_bits_fflags_bits_uop_uopc; // @[fp-pipeline.scala:28:7]
wire [31:0] io_from_int_bits_fflags_bits_uop_inst_0 = io_from_int_bits_fflags_bits_uop_inst; // @[fp-pipeline.scala:28:7]
wire [31:0] io_from_int_bits_fflags_bits_uop_debug_inst_0 = io_from_int_bits_fflags_bits_uop_debug_inst; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_is_rvc_0 = io_from_int_bits_fflags_bits_uop_is_rvc; // @[fp-pipeline.scala:28:7]
wire [39:0] io_from_int_bits_fflags_bits_uop_debug_pc_0 = io_from_int_bits_fflags_bits_uop_debug_pc; // @[fp-pipeline.scala:28:7]
wire [2:0] io_from_int_bits_fflags_bits_uop_iq_type_0 = io_from_int_bits_fflags_bits_uop_iq_type; // @[fp-pipeline.scala:28:7]
wire [9:0] io_from_int_bits_fflags_bits_uop_fu_code_0 = io_from_int_bits_fflags_bits_uop_fu_code; // @[fp-pipeline.scala:28:7]
wire [3:0] io_from_int_bits_fflags_bits_uop_ctrl_br_type_0 = io_from_int_bits_fflags_bits_uop_ctrl_br_type; // @[fp-pipeline.scala:28:7]
wire [1:0] io_from_int_bits_fflags_bits_uop_ctrl_op1_sel_0 = io_from_int_bits_fflags_bits_uop_ctrl_op1_sel; // @[fp-pipeline.scala:28:7]
wire [2:0] io_from_int_bits_fflags_bits_uop_ctrl_op2_sel_0 = io_from_int_bits_fflags_bits_uop_ctrl_op2_sel; // @[fp-pipeline.scala:28:7]
wire [2:0] io_from_int_bits_fflags_bits_uop_ctrl_imm_sel_0 = io_from_int_bits_fflags_bits_uop_ctrl_imm_sel; // @[fp-pipeline.scala:28:7]
wire [4:0] io_from_int_bits_fflags_bits_uop_ctrl_op_fcn_0 = io_from_int_bits_fflags_bits_uop_ctrl_op_fcn; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_ctrl_fcn_dw_0 = io_from_int_bits_fflags_bits_uop_ctrl_fcn_dw; // @[fp-pipeline.scala:28:7]
wire [2:0] io_from_int_bits_fflags_bits_uop_ctrl_csr_cmd_0 = io_from_int_bits_fflags_bits_uop_ctrl_csr_cmd; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_ctrl_is_load_0 = io_from_int_bits_fflags_bits_uop_ctrl_is_load; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_ctrl_is_sta_0 = io_from_int_bits_fflags_bits_uop_ctrl_is_sta; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_ctrl_is_std_0 = io_from_int_bits_fflags_bits_uop_ctrl_is_std; // @[fp-pipeline.scala:28:7]
wire [1:0] io_from_int_bits_fflags_bits_uop_iw_state_0 = io_from_int_bits_fflags_bits_uop_iw_state; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_iw_p1_poisoned_0 = io_from_int_bits_fflags_bits_uop_iw_p1_poisoned; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_iw_p2_poisoned_0 = io_from_int_bits_fflags_bits_uop_iw_p2_poisoned; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_is_br_0 = io_from_int_bits_fflags_bits_uop_is_br; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_is_jalr_0 = io_from_int_bits_fflags_bits_uop_is_jalr; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_is_jal_0 = io_from_int_bits_fflags_bits_uop_is_jal; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_is_sfb_0 = io_from_int_bits_fflags_bits_uop_is_sfb; // @[fp-pipeline.scala:28:7]
wire [15:0] io_from_int_bits_fflags_bits_uop_br_mask_0 = io_from_int_bits_fflags_bits_uop_br_mask; // @[fp-pipeline.scala:28:7]
wire [3:0] io_from_int_bits_fflags_bits_uop_br_tag_0 = io_from_int_bits_fflags_bits_uop_br_tag; // @[fp-pipeline.scala:28:7]
wire [4:0] io_from_int_bits_fflags_bits_uop_ftq_idx_0 = io_from_int_bits_fflags_bits_uop_ftq_idx; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_edge_inst_0 = io_from_int_bits_fflags_bits_uop_edge_inst; // @[fp-pipeline.scala:28:7]
wire [5:0] io_from_int_bits_fflags_bits_uop_pc_lob_0 = io_from_int_bits_fflags_bits_uop_pc_lob; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_taken_0 = io_from_int_bits_fflags_bits_uop_taken; // @[fp-pipeline.scala:28:7]
wire [19:0] io_from_int_bits_fflags_bits_uop_imm_packed_0 = io_from_int_bits_fflags_bits_uop_imm_packed; // @[fp-pipeline.scala:28:7]
wire [11:0] io_from_int_bits_fflags_bits_uop_csr_addr_0 = io_from_int_bits_fflags_bits_uop_csr_addr; // @[fp-pipeline.scala:28:7]
wire [6:0] io_from_int_bits_fflags_bits_uop_rob_idx_0 = io_from_int_bits_fflags_bits_uop_rob_idx; // @[fp-pipeline.scala:28:7]
wire [4:0] io_from_int_bits_fflags_bits_uop_ldq_idx_0 = io_from_int_bits_fflags_bits_uop_ldq_idx; // @[fp-pipeline.scala:28:7]
wire [4:0] io_from_int_bits_fflags_bits_uop_stq_idx_0 = io_from_int_bits_fflags_bits_uop_stq_idx; // @[fp-pipeline.scala:28:7]
wire [1:0] io_from_int_bits_fflags_bits_uop_rxq_idx_0 = io_from_int_bits_fflags_bits_uop_rxq_idx; // @[fp-pipeline.scala:28:7]
wire [6:0] io_from_int_bits_fflags_bits_uop_pdst_0 = io_from_int_bits_fflags_bits_uop_pdst; // @[fp-pipeline.scala:28:7]
wire [6:0] io_from_int_bits_fflags_bits_uop_prs1_0 = io_from_int_bits_fflags_bits_uop_prs1; // @[fp-pipeline.scala:28:7]
wire [6:0] io_from_int_bits_fflags_bits_uop_prs2_0 = io_from_int_bits_fflags_bits_uop_prs2; // @[fp-pipeline.scala:28:7]
wire [6:0] io_from_int_bits_fflags_bits_uop_prs3_0 = io_from_int_bits_fflags_bits_uop_prs3; // @[fp-pipeline.scala:28:7]
wire [4:0] io_from_int_bits_fflags_bits_uop_ppred_0 = io_from_int_bits_fflags_bits_uop_ppred; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_prs1_busy_0 = io_from_int_bits_fflags_bits_uop_prs1_busy; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_prs2_busy_0 = io_from_int_bits_fflags_bits_uop_prs2_busy; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_prs3_busy_0 = io_from_int_bits_fflags_bits_uop_prs3_busy; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_ppred_busy_0 = io_from_int_bits_fflags_bits_uop_ppred_busy; // @[fp-pipeline.scala:28:7]
wire [6:0] io_from_int_bits_fflags_bits_uop_stale_pdst_0 = io_from_int_bits_fflags_bits_uop_stale_pdst; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_exception_0 = io_from_int_bits_fflags_bits_uop_exception; // @[fp-pipeline.scala:28:7]
wire [63:0] io_from_int_bits_fflags_bits_uop_exc_cause_0 = io_from_int_bits_fflags_bits_uop_exc_cause; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_bypassable_0 = io_from_int_bits_fflags_bits_uop_bypassable; // @[fp-pipeline.scala:28:7]
wire [4:0] io_from_int_bits_fflags_bits_uop_mem_cmd_0 = io_from_int_bits_fflags_bits_uop_mem_cmd; // @[fp-pipeline.scala:28:7]
wire [1:0] io_from_int_bits_fflags_bits_uop_mem_size_0 = io_from_int_bits_fflags_bits_uop_mem_size; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_mem_signed_0 = io_from_int_bits_fflags_bits_uop_mem_signed; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_is_fence_0 = io_from_int_bits_fflags_bits_uop_is_fence; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_is_fencei_0 = io_from_int_bits_fflags_bits_uop_is_fencei; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_is_amo_0 = io_from_int_bits_fflags_bits_uop_is_amo; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_uses_ldq_0 = io_from_int_bits_fflags_bits_uop_uses_ldq; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_uses_stq_0 = io_from_int_bits_fflags_bits_uop_uses_stq; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_is_sys_pc2epc_0 = io_from_int_bits_fflags_bits_uop_is_sys_pc2epc; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_is_unique_0 = io_from_int_bits_fflags_bits_uop_is_unique; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_flush_on_commit_0 = io_from_int_bits_fflags_bits_uop_flush_on_commit; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_ldst_is_rs1_0 = io_from_int_bits_fflags_bits_uop_ldst_is_rs1; // @[fp-pipeline.scala:28:7]
wire [5:0] io_from_int_bits_fflags_bits_uop_ldst_0 = io_from_int_bits_fflags_bits_uop_ldst; // @[fp-pipeline.scala:28:7]
wire [5:0] io_from_int_bits_fflags_bits_uop_lrs1_0 = io_from_int_bits_fflags_bits_uop_lrs1; // @[fp-pipeline.scala:28:7]
wire [5:0] io_from_int_bits_fflags_bits_uop_lrs2_0 = io_from_int_bits_fflags_bits_uop_lrs2; // @[fp-pipeline.scala:28:7]
wire [5:0] io_from_int_bits_fflags_bits_uop_lrs3_0 = io_from_int_bits_fflags_bits_uop_lrs3; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_ldst_val_0 = io_from_int_bits_fflags_bits_uop_ldst_val; // @[fp-pipeline.scala:28:7]
wire [1:0] io_from_int_bits_fflags_bits_uop_dst_rtype_0 = io_from_int_bits_fflags_bits_uop_dst_rtype; // @[fp-pipeline.scala:28:7]
wire [1:0] io_from_int_bits_fflags_bits_uop_lrs1_rtype_0 = io_from_int_bits_fflags_bits_uop_lrs1_rtype; // @[fp-pipeline.scala:28:7]
wire [1:0] io_from_int_bits_fflags_bits_uop_lrs2_rtype_0 = io_from_int_bits_fflags_bits_uop_lrs2_rtype; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_frs3_en_0 = io_from_int_bits_fflags_bits_uop_frs3_en; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_fp_val_0 = io_from_int_bits_fflags_bits_uop_fp_val; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_fp_single_0 = io_from_int_bits_fflags_bits_uop_fp_single; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_xcpt_pf_if_0 = io_from_int_bits_fflags_bits_uop_xcpt_pf_if; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_xcpt_ae_if_0 = io_from_int_bits_fflags_bits_uop_xcpt_ae_if; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_xcpt_ma_if_0 = io_from_int_bits_fflags_bits_uop_xcpt_ma_if; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_bp_debug_if_0 = io_from_int_bits_fflags_bits_uop_bp_debug_if; // @[fp-pipeline.scala:28:7]
wire io_from_int_bits_fflags_bits_uop_bp_xcpt_if_0 = io_from_int_bits_fflags_bits_uop_bp_xcpt_if; // @[fp-pipeline.scala:28:7]
wire [1:0] io_from_int_bits_fflags_bits_uop_debug_fsrc_0 = io_from_int_bits_fflags_bits_uop_debug_fsrc; // @[fp-pipeline.scala:28:7]
wire [1:0] io_from_int_bits_fflags_bits_uop_debug_tsrc_0 = io_from_int_bits_fflags_bits_uop_debug_tsrc; // @[fp-pipeline.scala:28:7]
wire [4:0] io_from_int_bits_fflags_bits_flags_0 = io_from_int_bits_fflags_bits_flags; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_ready_0 = io_to_sdq_ready; // @[fp-pipeline.scala:28:7]
wire io_to_int_ready_0 = io_to_int_ready; // @[fp-pipeline.scala:28:7]
wire [63:0] io_debug_tsc_reg_0 = io_debug_tsc_reg; // @[fp-pipeline.scala:28:7]
wire [31:0] io_status_isa = 32'h14112D; // @[fp-pipeline.scala:28:7]
wire [22:0] io_status_zero2 = 23'h0; // @[fp-pipeline.scala:28:7]
wire io_status_mbe = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_status_sbe = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_status_sd_rv32 = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_status_ube = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_status_upie = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_status_hie = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_status_uie = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_0_bits_ppred_busy = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_bits_ppred_busy = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_bits_ppred_busy = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_predicated = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_valid = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_is_rvc = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_ctrl_fcn_dw = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_ctrl_is_load = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_ctrl_is_sta = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_ctrl_is_std = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_iw_p1_poisoned = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_iw_p2_poisoned = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_is_br = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_is_jalr = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_is_jal = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_is_sfb = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_edge_inst = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_taken = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_prs1_busy = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_prs2_busy = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_prs3_busy = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_ppred_busy = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_exception = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_bypassable = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_mem_signed = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_is_fence = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_is_fencei = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_is_amo = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_uses_ldq = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_uses_stq = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_is_sys_pc2epc = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_is_unique = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_flush_on_commit = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_ldst_is_rs1 = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_ldst_val = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_frs3_en = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_fp_val = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_fp_single = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_xcpt_pf_if = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_xcpt_ae_if = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_xcpt_ma_if = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_bp_debug_if = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_bits_fflags_bits_uop_bp_xcpt_if = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_predicated = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_wb_valids_0 = 1'h0; // @[fp-pipeline.scala:28:7]
wire io_wb_valids_1 = 1'h0; // @[fp-pipeline.scala:28:7]
wire iss_uops_0_iw_p1_poisoned = 1'h0; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_iw_p2_poisoned = 1'h0; // @[fp-pipeline.scala:89:24]
wire [7:0] io_status_zero1 = 8'h0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_status_xs = 2'h0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_status_vs = 2'h0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_ll_wports_0_bits_fflags_bits_uop_ctrl_op1_sel = 2'h0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_ll_wports_0_bits_fflags_bits_uop_iw_state = 2'h0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_ll_wports_0_bits_fflags_bits_uop_rxq_idx = 2'h0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_ll_wports_0_bits_fflags_bits_uop_mem_size = 2'h0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_ll_wports_0_bits_fflags_bits_uop_dst_rtype = 2'h0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_ll_wports_0_bits_fflags_bits_uop_lrs1_rtype = 2'h0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_ll_wports_0_bits_fflags_bits_uop_lrs2_rtype = 2'h0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_ll_wports_0_bits_fflags_bits_uop_debug_fsrc = 2'h0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_ll_wports_0_bits_fflags_bits_uop_debug_tsrc = 2'h0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_status_sxl = 2'h2; // @[fp-pipeline.scala:28:7]
wire [1:0] io_status_uxl = 2'h2; // @[fp-pipeline.scala:28:7]
wire [4:0] io_dis_uops_0_bits_ppred = 5'h0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_dis_uops_1_bits_ppred = 5'h0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_dis_uops_2_bits_ppred = 5'h0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_ll_wports_0_bits_fflags_bits_uop_ctrl_op_fcn = 5'h0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_ll_wports_0_bits_fflags_bits_uop_ftq_idx = 5'h0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_ll_wports_0_bits_fflags_bits_uop_ldq_idx = 5'h0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_ll_wports_0_bits_fflags_bits_uop_stq_idx = 5'h0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_ll_wports_0_bits_fflags_bits_uop_ppred = 5'h0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_ll_wports_0_bits_fflags_bits_uop_mem_cmd = 5'h0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_ll_wports_0_bits_fflags_bits_flags = 5'h0; // @[fp-pipeline.scala:28:7]
wire io_ll_wports_0_ready = 1'h1; // @[fp-pipeline.scala:28:7]
wire [6:0] io_ll_wports_0_bits_fflags_bits_uop_uopc = 7'h0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_ll_wports_0_bits_fflags_bits_uop_rob_idx = 7'h0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_ll_wports_0_bits_fflags_bits_uop_pdst = 7'h0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_ll_wports_0_bits_fflags_bits_uop_prs1 = 7'h0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_ll_wports_0_bits_fflags_bits_uop_prs2 = 7'h0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_ll_wports_0_bits_fflags_bits_uop_prs3 = 7'h0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_ll_wports_0_bits_fflags_bits_uop_stale_pdst = 7'h0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wb_pdsts_0 = 7'h0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wb_pdsts_1 = 7'h0; // @[fp-pipeline.scala:28:7]
wire [31:0] io_ll_wports_0_bits_fflags_bits_uop_inst = 32'h0; // @[fp-pipeline.scala:28:7]
wire [31:0] io_ll_wports_0_bits_fflags_bits_uop_debug_inst = 32'h0; // @[fp-pipeline.scala:28:7]
wire [39:0] io_ll_wports_0_bits_fflags_bits_uop_debug_pc = 40'h0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_ll_wports_0_bits_fflags_bits_uop_iq_type = 3'h0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_ll_wports_0_bits_fflags_bits_uop_ctrl_op2_sel = 3'h0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_ll_wports_0_bits_fflags_bits_uop_ctrl_imm_sel = 3'h0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_ll_wports_0_bits_fflags_bits_uop_ctrl_csr_cmd = 3'h0; // @[fp-pipeline.scala:28:7]
wire [9:0] io_ll_wports_0_bits_fflags_bits_uop_fu_code = 10'h0; // @[fp-pipeline.scala:28:7]
wire [3:0] io_ll_wports_0_bits_fflags_bits_uop_ctrl_br_type = 4'h0; // @[fp-pipeline.scala:28:7]
wire [3:0] io_ll_wports_0_bits_fflags_bits_uop_br_tag = 4'h0; // @[fp-pipeline.scala:28:7]
wire [15:0] io_ll_wports_0_bits_fflags_bits_uop_br_mask = 16'h0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_ll_wports_0_bits_fflags_bits_uop_pc_lob = 6'h0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_ll_wports_0_bits_fflags_bits_uop_ldst = 6'h0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_ll_wports_0_bits_fflags_bits_uop_lrs1 = 6'h0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_ll_wports_0_bits_fflags_bits_uop_lrs2 = 6'h0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_ll_wports_0_bits_fflags_bits_uop_lrs3 = 6'h0; // @[fp-pipeline.scala:28:7]
wire [19:0] io_ll_wports_0_bits_fflags_bits_uop_imm_packed = 20'h0; // @[fp-pipeline.scala:28:7]
wire [11:0] io_ll_wports_0_bits_fflags_bits_uop_csr_addr = 12'h0; // @[fp-pipeline.scala:28:7]
wire [63:0] io_ll_wports_0_bits_fflags_bits_uop_exc_cause = 64'h0; // @[fp-pipeline.scala:28:7]
wire _io_to_sdq_valid_T_1; // @[fp-pipeline.scala:216:49]
wire _io_to_int_valid_T_2; // @[fp-pipeline.scala:215:49]
wire _io_wakeups_1_valid_T_1; // @[fp-pipeline.scala:243:37]
wire io_dis_uops_0_ready_0; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_1_ready_0; // @[fp-pipeline.scala:28:7]
wire io_dis_uops_2_ready_0; // @[fp-pipeline.scala:28:7]
wire io_from_int_ready_0; // @[fp-pipeline.scala:28:7]
wire [3:0] io_to_sdq_bits_uop_ctrl_br_type_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_sdq_bits_uop_ctrl_op1_sel_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_to_sdq_bits_uop_ctrl_op2_sel_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_to_sdq_bits_uop_ctrl_imm_sel_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_sdq_bits_uop_ctrl_op_fcn_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_ctrl_fcn_dw_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_to_sdq_bits_uop_ctrl_csr_cmd_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_ctrl_is_load_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_ctrl_is_sta_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_ctrl_is_std_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_sdq_bits_uop_uopc_0; // @[fp-pipeline.scala:28:7]
wire [31:0] io_to_sdq_bits_uop_inst_0; // @[fp-pipeline.scala:28:7]
wire [31:0] io_to_sdq_bits_uop_debug_inst_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_is_rvc_0; // @[fp-pipeline.scala:28:7]
wire [39:0] io_to_sdq_bits_uop_debug_pc_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_to_sdq_bits_uop_iq_type_0; // @[fp-pipeline.scala:28:7]
wire [9:0] io_to_sdq_bits_uop_fu_code_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_sdq_bits_uop_iw_state_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_iw_p1_poisoned_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_iw_p2_poisoned_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_is_br_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_is_jalr_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_is_jal_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_is_sfb_0; // @[fp-pipeline.scala:28:7]
wire [15:0] io_to_sdq_bits_uop_br_mask_0; // @[fp-pipeline.scala:28:7]
wire [3:0] io_to_sdq_bits_uop_br_tag_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_sdq_bits_uop_ftq_idx_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_edge_inst_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_to_sdq_bits_uop_pc_lob_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_taken_0; // @[fp-pipeline.scala:28:7]
wire [19:0] io_to_sdq_bits_uop_imm_packed_0; // @[fp-pipeline.scala:28:7]
wire [11:0] io_to_sdq_bits_uop_csr_addr_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_sdq_bits_uop_rob_idx_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_sdq_bits_uop_ldq_idx_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_sdq_bits_uop_stq_idx_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_sdq_bits_uop_rxq_idx_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_sdq_bits_uop_pdst_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_sdq_bits_uop_prs1_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_sdq_bits_uop_prs2_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_sdq_bits_uop_prs3_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_sdq_bits_uop_ppred_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_prs1_busy_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_prs2_busy_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_prs3_busy_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_ppred_busy_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_sdq_bits_uop_stale_pdst_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_exception_0; // @[fp-pipeline.scala:28:7]
wire [63:0] io_to_sdq_bits_uop_exc_cause_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_bypassable_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_sdq_bits_uop_mem_cmd_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_sdq_bits_uop_mem_size_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_mem_signed_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_is_fence_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_is_fencei_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_is_amo_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_uses_ldq_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_uses_stq_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_is_sys_pc2epc_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_is_unique_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_flush_on_commit_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_ldst_is_rs1_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_to_sdq_bits_uop_ldst_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_to_sdq_bits_uop_lrs1_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_to_sdq_bits_uop_lrs2_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_to_sdq_bits_uop_lrs3_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_ldst_val_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_sdq_bits_uop_dst_rtype_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_sdq_bits_uop_lrs1_rtype_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_sdq_bits_uop_lrs2_rtype_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_frs3_en_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_fp_val_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_fp_single_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_xcpt_pf_if_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_xcpt_ae_if_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_xcpt_ma_if_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_bp_debug_if_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_uop_bp_xcpt_if_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_sdq_bits_uop_debug_fsrc_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_sdq_bits_uop_debug_tsrc_0; // @[fp-pipeline.scala:28:7]
wire [3:0] io_to_sdq_bits_fflags_bits_uop_ctrl_br_type_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_sdq_bits_fflags_bits_uop_ctrl_op1_sel_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_to_sdq_bits_fflags_bits_uop_ctrl_op2_sel_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_to_sdq_bits_fflags_bits_uop_ctrl_imm_sel_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_sdq_bits_fflags_bits_uop_ctrl_op_fcn_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_ctrl_fcn_dw_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_to_sdq_bits_fflags_bits_uop_ctrl_csr_cmd_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_ctrl_is_load_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_ctrl_is_sta_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_ctrl_is_std_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_sdq_bits_fflags_bits_uop_uopc_0; // @[fp-pipeline.scala:28:7]
wire [31:0] io_to_sdq_bits_fflags_bits_uop_inst_0; // @[fp-pipeline.scala:28:7]
wire [31:0] io_to_sdq_bits_fflags_bits_uop_debug_inst_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_is_rvc_0; // @[fp-pipeline.scala:28:7]
wire [39:0] io_to_sdq_bits_fflags_bits_uop_debug_pc_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_to_sdq_bits_fflags_bits_uop_iq_type_0; // @[fp-pipeline.scala:28:7]
wire [9:0] io_to_sdq_bits_fflags_bits_uop_fu_code_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_sdq_bits_fflags_bits_uop_iw_state_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_iw_p1_poisoned_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_iw_p2_poisoned_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_is_br_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_is_jalr_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_is_jal_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_is_sfb_0; // @[fp-pipeline.scala:28:7]
wire [15:0] io_to_sdq_bits_fflags_bits_uop_br_mask_0; // @[fp-pipeline.scala:28:7]
wire [3:0] io_to_sdq_bits_fflags_bits_uop_br_tag_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_sdq_bits_fflags_bits_uop_ftq_idx_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_edge_inst_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_to_sdq_bits_fflags_bits_uop_pc_lob_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_taken_0; // @[fp-pipeline.scala:28:7]
wire [19:0] io_to_sdq_bits_fflags_bits_uop_imm_packed_0; // @[fp-pipeline.scala:28:7]
wire [11:0] io_to_sdq_bits_fflags_bits_uop_csr_addr_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_sdq_bits_fflags_bits_uop_rob_idx_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_sdq_bits_fflags_bits_uop_ldq_idx_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_sdq_bits_fflags_bits_uop_stq_idx_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_sdq_bits_fflags_bits_uop_rxq_idx_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_sdq_bits_fflags_bits_uop_pdst_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_sdq_bits_fflags_bits_uop_prs1_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_sdq_bits_fflags_bits_uop_prs2_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_sdq_bits_fflags_bits_uop_prs3_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_sdq_bits_fflags_bits_uop_ppred_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_prs1_busy_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_prs2_busy_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_prs3_busy_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_ppred_busy_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_sdq_bits_fflags_bits_uop_stale_pdst_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_exception_0; // @[fp-pipeline.scala:28:7]
wire [63:0] io_to_sdq_bits_fflags_bits_uop_exc_cause_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_bypassable_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_sdq_bits_fflags_bits_uop_mem_cmd_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_sdq_bits_fflags_bits_uop_mem_size_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_mem_signed_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_is_fence_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_is_fencei_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_is_amo_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_uses_ldq_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_uses_stq_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_is_sys_pc2epc_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_is_unique_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_flush_on_commit_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_ldst_is_rs1_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_to_sdq_bits_fflags_bits_uop_ldst_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_to_sdq_bits_fflags_bits_uop_lrs1_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_to_sdq_bits_fflags_bits_uop_lrs2_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_to_sdq_bits_fflags_bits_uop_lrs3_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_ldst_val_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_sdq_bits_fflags_bits_uop_dst_rtype_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_sdq_bits_fflags_bits_uop_lrs1_rtype_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_sdq_bits_fflags_bits_uop_lrs2_rtype_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_frs3_en_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_fp_val_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_fp_single_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_xcpt_pf_if_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_xcpt_ae_if_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_xcpt_ma_if_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_bp_debug_if_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_bits_uop_bp_xcpt_if_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_sdq_bits_fflags_bits_uop_debug_fsrc_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_sdq_bits_fflags_bits_uop_debug_tsrc_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_sdq_bits_fflags_bits_flags_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_fflags_valid_0; // @[fp-pipeline.scala:28:7]
wire [63:0] io_to_sdq_bits_data_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_bits_predicated_0; // @[fp-pipeline.scala:28:7]
wire io_to_sdq_valid_0; // @[fp-pipeline.scala:28:7]
wire [3:0] io_to_int_bits_uop_ctrl_br_type_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_int_bits_uop_ctrl_op1_sel_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_to_int_bits_uop_ctrl_op2_sel_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_to_int_bits_uop_ctrl_imm_sel_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_int_bits_uop_ctrl_op_fcn_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_ctrl_fcn_dw_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_to_int_bits_uop_ctrl_csr_cmd_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_ctrl_is_load_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_ctrl_is_sta_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_ctrl_is_std_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_int_bits_uop_uopc_0; // @[fp-pipeline.scala:28:7]
wire [31:0] io_to_int_bits_uop_inst_0; // @[fp-pipeline.scala:28:7]
wire [31:0] io_to_int_bits_uop_debug_inst_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_is_rvc_0; // @[fp-pipeline.scala:28:7]
wire [39:0] io_to_int_bits_uop_debug_pc_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_to_int_bits_uop_iq_type_0; // @[fp-pipeline.scala:28:7]
wire [9:0] io_to_int_bits_uop_fu_code_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_int_bits_uop_iw_state_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_iw_p1_poisoned_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_iw_p2_poisoned_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_is_br_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_is_jalr_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_is_jal_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_is_sfb_0; // @[fp-pipeline.scala:28:7]
wire [15:0] io_to_int_bits_uop_br_mask_0; // @[fp-pipeline.scala:28:7]
wire [3:0] io_to_int_bits_uop_br_tag_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_int_bits_uop_ftq_idx_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_edge_inst_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_to_int_bits_uop_pc_lob_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_taken_0; // @[fp-pipeline.scala:28:7]
wire [19:0] io_to_int_bits_uop_imm_packed_0; // @[fp-pipeline.scala:28:7]
wire [11:0] io_to_int_bits_uop_csr_addr_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_int_bits_uop_rob_idx_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_int_bits_uop_ldq_idx_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_int_bits_uop_stq_idx_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_int_bits_uop_rxq_idx_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_int_bits_uop_pdst_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_int_bits_uop_prs1_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_int_bits_uop_prs2_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_int_bits_uop_prs3_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_int_bits_uop_ppred_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_prs1_busy_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_prs2_busy_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_prs3_busy_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_ppred_busy_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_int_bits_uop_stale_pdst_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_exception_0; // @[fp-pipeline.scala:28:7]
wire [63:0] io_to_int_bits_uop_exc_cause_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_bypassable_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_int_bits_uop_mem_cmd_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_int_bits_uop_mem_size_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_mem_signed_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_is_fence_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_is_fencei_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_is_amo_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_uses_ldq_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_uses_stq_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_is_sys_pc2epc_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_is_unique_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_flush_on_commit_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_ldst_is_rs1_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_to_int_bits_uop_ldst_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_to_int_bits_uop_lrs1_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_to_int_bits_uop_lrs2_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_to_int_bits_uop_lrs3_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_ldst_val_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_int_bits_uop_dst_rtype_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_int_bits_uop_lrs1_rtype_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_int_bits_uop_lrs2_rtype_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_frs3_en_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_fp_val_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_fp_single_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_xcpt_pf_if_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_xcpt_ae_if_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_xcpt_ma_if_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_bp_debug_if_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_uop_bp_xcpt_if_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_int_bits_uop_debug_fsrc_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_int_bits_uop_debug_tsrc_0; // @[fp-pipeline.scala:28:7]
wire [3:0] io_to_int_bits_fflags_bits_uop_ctrl_br_type_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_int_bits_fflags_bits_uop_ctrl_op1_sel_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_to_int_bits_fflags_bits_uop_ctrl_op2_sel_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_to_int_bits_fflags_bits_uop_ctrl_imm_sel_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_int_bits_fflags_bits_uop_ctrl_op_fcn_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_ctrl_fcn_dw_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_to_int_bits_fflags_bits_uop_ctrl_csr_cmd_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_ctrl_is_load_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_ctrl_is_sta_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_ctrl_is_std_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_int_bits_fflags_bits_uop_uopc_0; // @[fp-pipeline.scala:28:7]
wire [31:0] io_to_int_bits_fflags_bits_uop_inst_0; // @[fp-pipeline.scala:28:7]
wire [31:0] io_to_int_bits_fflags_bits_uop_debug_inst_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_is_rvc_0; // @[fp-pipeline.scala:28:7]
wire [39:0] io_to_int_bits_fflags_bits_uop_debug_pc_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_to_int_bits_fflags_bits_uop_iq_type_0; // @[fp-pipeline.scala:28:7]
wire [9:0] io_to_int_bits_fflags_bits_uop_fu_code_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_int_bits_fflags_bits_uop_iw_state_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_iw_p1_poisoned_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_iw_p2_poisoned_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_is_br_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_is_jalr_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_is_jal_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_is_sfb_0; // @[fp-pipeline.scala:28:7]
wire [15:0] io_to_int_bits_fflags_bits_uop_br_mask_0; // @[fp-pipeline.scala:28:7]
wire [3:0] io_to_int_bits_fflags_bits_uop_br_tag_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_int_bits_fflags_bits_uop_ftq_idx_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_edge_inst_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_to_int_bits_fflags_bits_uop_pc_lob_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_taken_0; // @[fp-pipeline.scala:28:7]
wire [19:0] io_to_int_bits_fflags_bits_uop_imm_packed_0; // @[fp-pipeline.scala:28:7]
wire [11:0] io_to_int_bits_fflags_bits_uop_csr_addr_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_int_bits_fflags_bits_uop_rob_idx_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_int_bits_fflags_bits_uop_ldq_idx_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_int_bits_fflags_bits_uop_stq_idx_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_int_bits_fflags_bits_uop_rxq_idx_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_int_bits_fflags_bits_uop_pdst_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_int_bits_fflags_bits_uop_prs1_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_int_bits_fflags_bits_uop_prs2_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_int_bits_fflags_bits_uop_prs3_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_int_bits_fflags_bits_uop_ppred_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_prs1_busy_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_prs2_busy_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_prs3_busy_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_ppred_busy_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_to_int_bits_fflags_bits_uop_stale_pdst_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_exception_0; // @[fp-pipeline.scala:28:7]
wire [63:0] io_to_int_bits_fflags_bits_uop_exc_cause_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_bypassable_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_int_bits_fflags_bits_uop_mem_cmd_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_int_bits_fflags_bits_uop_mem_size_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_mem_signed_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_is_fence_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_is_fencei_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_is_amo_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_uses_ldq_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_uses_stq_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_is_sys_pc2epc_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_is_unique_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_flush_on_commit_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_ldst_is_rs1_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_to_int_bits_fflags_bits_uop_ldst_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_to_int_bits_fflags_bits_uop_lrs1_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_to_int_bits_fflags_bits_uop_lrs2_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_to_int_bits_fflags_bits_uop_lrs3_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_ldst_val_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_int_bits_fflags_bits_uop_dst_rtype_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_int_bits_fflags_bits_uop_lrs1_rtype_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_int_bits_fflags_bits_uop_lrs2_rtype_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_frs3_en_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_fp_val_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_fp_single_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_xcpt_pf_if_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_xcpt_ae_if_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_xcpt_ma_if_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_bp_debug_if_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_bits_uop_bp_xcpt_if_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_int_bits_fflags_bits_uop_debug_fsrc_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_to_int_bits_fflags_bits_uop_debug_tsrc_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_to_int_bits_fflags_bits_flags_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_fflags_valid_0; // @[fp-pipeline.scala:28:7]
wire [63:0] io_to_int_bits_data_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_bits_predicated_0; // @[fp-pipeline.scala:28:7]
wire io_to_int_valid_0; // @[fp-pipeline.scala:28:7]
wire [3:0] io_wakeups_0_bits_uop_ctrl_br_type_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_0_bits_uop_ctrl_op1_sel_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_wakeups_0_bits_uop_ctrl_op2_sel_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_wakeups_0_bits_uop_ctrl_imm_sel_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_0_bits_uop_ctrl_op_fcn_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_ctrl_fcn_dw_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_wakeups_0_bits_uop_ctrl_csr_cmd_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_ctrl_is_load_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_ctrl_is_sta_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_ctrl_is_std_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_0_bits_uop_uopc_0; // @[fp-pipeline.scala:28:7]
wire [31:0] io_wakeups_0_bits_uop_inst_0; // @[fp-pipeline.scala:28:7]
wire [31:0] io_wakeups_0_bits_uop_debug_inst_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_is_rvc_0; // @[fp-pipeline.scala:28:7]
wire [39:0] io_wakeups_0_bits_uop_debug_pc_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_wakeups_0_bits_uop_iq_type_0; // @[fp-pipeline.scala:28:7]
wire [9:0] io_wakeups_0_bits_uop_fu_code_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_0_bits_uop_iw_state_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_iw_p1_poisoned_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_iw_p2_poisoned_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_is_br_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_is_jalr_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_is_jal_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_is_sfb_0; // @[fp-pipeline.scala:28:7]
wire [15:0] io_wakeups_0_bits_uop_br_mask_0; // @[fp-pipeline.scala:28:7]
wire [3:0] io_wakeups_0_bits_uop_br_tag_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_0_bits_uop_ftq_idx_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_edge_inst_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_wakeups_0_bits_uop_pc_lob_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_taken_0; // @[fp-pipeline.scala:28:7]
wire [19:0] io_wakeups_0_bits_uop_imm_packed_0; // @[fp-pipeline.scala:28:7]
wire [11:0] io_wakeups_0_bits_uop_csr_addr_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_0_bits_uop_rob_idx_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_0_bits_uop_ldq_idx_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_0_bits_uop_stq_idx_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_0_bits_uop_rxq_idx_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_0_bits_uop_pdst_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_0_bits_uop_prs1_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_0_bits_uop_prs2_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_0_bits_uop_prs3_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_0_bits_uop_ppred_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_prs1_busy_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_prs2_busy_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_prs3_busy_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_ppred_busy_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_0_bits_uop_stale_pdst_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_exception_0; // @[fp-pipeline.scala:28:7]
wire [63:0] io_wakeups_0_bits_uop_exc_cause_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_bypassable_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_0_bits_uop_mem_cmd_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_0_bits_uop_mem_size_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_mem_signed_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_is_fence_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_is_fencei_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_is_amo_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_uses_ldq_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_uses_stq_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_is_sys_pc2epc_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_is_unique_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_flush_on_commit_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_ldst_is_rs1_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_wakeups_0_bits_uop_ldst_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_wakeups_0_bits_uop_lrs1_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_wakeups_0_bits_uop_lrs2_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_wakeups_0_bits_uop_lrs3_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_ldst_val_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_0_bits_uop_dst_rtype_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_0_bits_uop_lrs1_rtype_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_0_bits_uop_lrs2_rtype_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_frs3_en_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_fp_val_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_fp_single_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_xcpt_pf_if_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_xcpt_ae_if_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_xcpt_ma_if_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_bp_debug_if_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_uop_bp_xcpt_if_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_0_bits_uop_debug_fsrc_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_0_bits_uop_debug_tsrc_0; // @[fp-pipeline.scala:28:7]
wire [3:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_br_type_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op1_sel_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op2_sel_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_imm_sel_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op_fcn_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_ctrl_fcn_dw_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_csr_cmd_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_ctrl_is_load_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_ctrl_is_sta_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_ctrl_is_std_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_0_bits_fflags_bits_uop_uopc_0; // @[fp-pipeline.scala:28:7]
wire [31:0] io_wakeups_0_bits_fflags_bits_uop_inst_0; // @[fp-pipeline.scala:28:7]
wire [31:0] io_wakeups_0_bits_fflags_bits_uop_debug_inst_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_rvc_0; // @[fp-pipeline.scala:28:7]
wire [39:0] io_wakeups_0_bits_fflags_bits_uop_debug_pc_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_wakeups_0_bits_fflags_bits_uop_iq_type_0; // @[fp-pipeline.scala:28:7]
wire [9:0] io_wakeups_0_bits_fflags_bits_uop_fu_code_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_iw_state_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_iw_p1_poisoned_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_iw_p2_poisoned_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_br_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_jalr_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_jal_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_sfb_0; // @[fp-pipeline.scala:28:7]
wire [15:0] io_wakeups_0_bits_fflags_bits_uop_br_mask_0; // @[fp-pipeline.scala:28:7]
wire [3:0] io_wakeups_0_bits_fflags_bits_uop_br_tag_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_0_bits_fflags_bits_uop_ftq_idx_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_edge_inst_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_wakeups_0_bits_fflags_bits_uop_pc_lob_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_taken_0; // @[fp-pipeline.scala:28:7]
wire [19:0] io_wakeups_0_bits_fflags_bits_uop_imm_packed_0; // @[fp-pipeline.scala:28:7]
wire [11:0] io_wakeups_0_bits_fflags_bits_uop_csr_addr_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_0_bits_fflags_bits_uop_rob_idx_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_0_bits_fflags_bits_uop_ldq_idx_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_0_bits_fflags_bits_uop_stq_idx_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_rxq_idx_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_0_bits_fflags_bits_uop_pdst_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_0_bits_fflags_bits_uop_prs1_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_0_bits_fflags_bits_uop_prs2_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_0_bits_fflags_bits_uop_prs3_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_0_bits_fflags_bits_uop_ppred_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_prs1_busy_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_prs2_busy_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_prs3_busy_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_ppred_busy_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_0_bits_fflags_bits_uop_stale_pdst_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_exception_0; // @[fp-pipeline.scala:28:7]
wire [63:0] io_wakeups_0_bits_fflags_bits_uop_exc_cause_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_bypassable_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_0_bits_fflags_bits_uop_mem_cmd_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_mem_size_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_mem_signed_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_fence_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_fencei_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_amo_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_uses_ldq_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_uses_stq_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_sys_pc2epc_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_unique_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_flush_on_commit_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_ldst_is_rs1_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_wakeups_0_bits_fflags_bits_uop_ldst_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs1_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs2_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs3_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_ldst_val_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_dst_rtype_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_lrs1_rtype_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_lrs2_rtype_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_frs3_en_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_fp_val_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_fp_single_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_xcpt_pf_if_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_xcpt_ae_if_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_xcpt_ma_if_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_bp_debug_if_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_bits_uop_bp_xcpt_if_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_debug_fsrc_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_debug_tsrc_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_0_bits_fflags_bits_flags_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_fflags_valid_0; // @[fp-pipeline.scala:28:7]
wire [64:0] io_wakeups_0_bits_data_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_bits_predicated_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_0_valid_0; // @[fp-pipeline.scala:28:7]
wire [3:0] io_wakeups_1_bits_uop_ctrl_br_type_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_1_bits_uop_ctrl_op1_sel_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_wakeups_1_bits_uop_ctrl_op2_sel_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_wakeups_1_bits_uop_ctrl_imm_sel_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_1_bits_uop_ctrl_op_fcn_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_ctrl_fcn_dw_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_wakeups_1_bits_uop_ctrl_csr_cmd_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_ctrl_is_load_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_ctrl_is_sta_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_ctrl_is_std_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_1_bits_uop_uopc_0; // @[fp-pipeline.scala:28:7]
wire [31:0] io_wakeups_1_bits_uop_inst_0; // @[fp-pipeline.scala:28:7]
wire [31:0] io_wakeups_1_bits_uop_debug_inst_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_is_rvc_0; // @[fp-pipeline.scala:28:7]
wire [39:0] io_wakeups_1_bits_uop_debug_pc_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_wakeups_1_bits_uop_iq_type_0; // @[fp-pipeline.scala:28:7]
wire [9:0] io_wakeups_1_bits_uop_fu_code_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_1_bits_uop_iw_state_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_iw_p1_poisoned_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_iw_p2_poisoned_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_is_br_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_is_jalr_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_is_jal_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_is_sfb_0; // @[fp-pipeline.scala:28:7]
wire [15:0] io_wakeups_1_bits_uop_br_mask_0; // @[fp-pipeline.scala:28:7]
wire [3:0] io_wakeups_1_bits_uop_br_tag_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_1_bits_uop_ftq_idx_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_edge_inst_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_wakeups_1_bits_uop_pc_lob_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_taken_0; // @[fp-pipeline.scala:28:7]
wire [19:0] io_wakeups_1_bits_uop_imm_packed_0; // @[fp-pipeline.scala:28:7]
wire [11:0] io_wakeups_1_bits_uop_csr_addr_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_1_bits_uop_rob_idx_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_1_bits_uop_ldq_idx_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_1_bits_uop_stq_idx_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_1_bits_uop_rxq_idx_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_1_bits_uop_pdst_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_1_bits_uop_prs1_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_1_bits_uop_prs2_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_1_bits_uop_prs3_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_1_bits_uop_ppred_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_prs1_busy_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_prs2_busy_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_prs3_busy_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_ppred_busy_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_1_bits_uop_stale_pdst_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_exception_0; // @[fp-pipeline.scala:28:7]
wire [63:0] io_wakeups_1_bits_uop_exc_cause_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_bypassable_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_1_bits_uop_mem_cmd_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_1_bits_uop_mem_size_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_mem_signed_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_is_fence_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_is_fencei_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_is_amo_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_uses_ldq_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_uses_stq_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_is_sys_pc2epc_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_is_unique_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_flush_on_commit_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_ldst_is_rs1_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_wakeups_1_bits_uop_ldst_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_wakeups_1_bits_uop_lrs1_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_wakeups_1_bits_uop_lrs2_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_wakeups_1_bits_uop_lrs3_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_ldst_val_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_1_bits_uop_dst_rtype_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_1_bits_uop_lrs1_rtype_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_1_bits_uop_lrs2_rtype_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_frs3_en_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_fp_val_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_fp_single_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_xcpt_pf_if_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_xcpt_ae_if_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_xcpt_ma_if_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_bp_debug_if_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_uop_bp_xcpt_if_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_1_bits_uop_debug_fsrc_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_1_bits_uop_debug_tsrc_0; // @[fp-pipeline.scala:28:7]
wire [3:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_br_type_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_op1_sel_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_op2_sel_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_imm_sel_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_op_fcn_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_ctrl_fcn_dw_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_csr_cmd_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_ctrl_is_load_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_ctrl_is_sta_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_ctrl_is_std_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_1_bits_fflags_bits_uop_uopc_0; // @[fp-pipeline.scala:28:7]
wire [31:0] io_wakeups_1_bits_fflags_bits_uop_inst_0; // @[fp-pipeline.scala:28:7]
wire [31:0] io_wakeups_1_bits_fflags_bits_uop_debug_inst_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_is_rvc_0; // @[fp-pipeline.scala:28:7]
wire [39:0] io_wakeups_1_bits_fflags_bits_uop_debug_pc_0; // @[fp-pipeline.scala:28:7]
wire [2:0] io_wakeups_1_bits_fflags_bits_uop_iq_type_0; // @[fp-pipeline.scala:28:7]
wire [9:0] io_wakeups_1_bits_fflags_bits_uop_fu_code_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_1_bits_fflags_bits_uop_iw_state_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_iw_p1_poisoned_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_iw_p2_poisoned_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_is_br_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_is_jalr_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_is_jal_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_is_sfb_0; // @[fp-pipeline.scala:28:7]
wire [15:0] io_wakeups_1_bits_fflags_bits_uop_br_mask_0; // @[fp-pipeline.scala:28:7]
wire [3:0] io_wakeups_1_bits_fflags_bits_uop_br_tag_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_1_bits_fflags_bits_uop_ftq_idx_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_edge_inst_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_wakeups_1_bits_fflags_bits_uop_pc_lob_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_taken_0; // @[fp-pipeline.scala:28:7]
wire [19:0] io_wakeups_1_bits_fflags_bits_uop_imm_packed_0; // @[fp-pipeline.scala:28:7]
wire [11:0] io_wakeups_1_bits_fflags_bits_uop_csr_addr_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_1_bits_fflags_bits_uop_rob_idx_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_1_bits_fflags_bits_uop_ldq_idx_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_1_bits_fflags_bits_uop_stq_idx_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_1_bits_fflags_bits_uop_rxq_idx_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_1_bits_fflags_bits_uop_pdst_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_1_bits_fflags_bits_uop_prs1_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_1_bits_fflags_bits_uop_prs2_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_1_bits_fflags_bits_uop_prs3_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_1_bits_fflags_bits_uop_ppred_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_prs1_busy_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_prs2_busy_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_prs3_busy_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_ppred_busy_0; // @[fp-pipeline.scala:28:7]
wire [6:0] io_wakeups_1_bits_fflags_bits_uop_stale_pdst_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_exception_0; // @[fp-pipeline.scala:28:7]
wire [63:0] io_wakeups_1_bits_fflags_bits_uop_exc_cause_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_bypassable_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_1_bits_fflags_bits_uop_mem_cmd_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_1_bits_fflags_bits_uop_mem_size_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_mem_signed_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_is_fence_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_is_fencei_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_is_amo_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_uses_ldq_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_uses_stq_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_is_sys_pc2epc_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_is_unique_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_flush_on_commit_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_ldst_is_rs1_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_wakeups_1_bits_fflags_bits_uop_ldst_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_wakeups_1_bits_fflags_bits_uop_lrs1_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_wakeups_1_bits_fflags_bits_uop_lrs2_0; // @[fp-pipeline.scala:28:7]
wire [5:0] io_wakeups_1_bits_fflags_bits_uop_lrs3_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_ldst_val_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_1_bits_fflags_bits_uop_dst_rtype_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_1_bits_fflags_bits_uop_lrs1_rtype_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_1_bits_fflags_bits_uop_lrs2_rtype_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_frs3_en_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_fp_val_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_fp_single_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_xcpt_pf_if_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_xcpt_ae_if_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_xcpt_ma_if_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_bp_debug_if_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_bits_uop_bp_xcpt_if_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_1_bits_fflags_bits_uop_debug_fsrc_0; // @[fp-pipeline.scala:28:7]
wire [1:0] io_wakeups_1_bits_fflags_bits_uop_debug_tsrc_0; // @[fp-pipeline.scala:28:7]
wire [4:0] io_wakeups_1_bits_fflags_bits_flags_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_bits_fflags_valid_0; // @[fp-pipeline.scala:28:7]
wire [64:0] io_wakeups_1_bits_data_0; // @[fp-pipeline.scala:28:7]
wire io_wakeups_1_valid_0; // @[fp-pipeline.scala:28:7]
wire [64:0] io_debug_wb_wdata_0_0; // @[fp-pipeline.scala:28:7]
wire [64:0] io_debug_wb_wdata_1_0; // @[fp-pipeline.scala:28:7]
wire iss_valids_0; // @[fp-pipeline.scala:88:24]
wire [3:0] iss_uops_0_ctrl_br_type; // @[fp-pipeline.scala:89:24]
wire [1:0] iss_uops_0_ctrl_op1_sel; // @[fp-pipeline.scala:89:24]
wire [2:0] iss_uops_0_ctrl_op2_sel; // @[fp-pipeline.scala:89:24]
wire [2:0] iss_uops_0_ctrl_imm_sel; // @[fp-pipeline.scala:89:24]
wire [4:0] iss_uops_0_ctrl_op_fcn; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_ctrl_fcn_dw; // @[fp-pipeline.scala:89:24]
wire [2:0] iss_uops_0_ctrl_csr_cmd; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_ctrl_is_load; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_ctrl_is_sta; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_ctrl_is_std; // @[fp-pipeline.scala:89:24]
wire [6:0] iss_uops_0_uopc; // @[fp-pipeline.scala:89:24]
wire [31:0] iss_uops_0_inst; // @[fp-pipeline.scala:89:24]
wire [31:0] iss_uops_0_debug_inst; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_is_rvc; // @[fp-pipeline.scala:89:24]
wire [39:0] iss_uops_0_debug_pc; // @[fp-pipeline.scala:89:24]
wire [2:0] iss_uops_0_iq_type; // @[fp-pipeline.scala:89:24]
wire [9:0] iss_uops_0_fu_code; // @[fp-pipeline.scala:89:24]
wire [1:0] iss_uops_0_iw_state; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_is_br; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_is_jalr; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_is_jal; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_is_sfb; // @[fp-pipeline.scala:89:24]
wire [15:0] iss_uops_0_br_mask; // @[fp-pipeline.scala:89:24]
wire [3:0] iss_uops_0_br_tag; // @[fp-pipeline.scala:89:24]
wire [4:0] iss_uops_0_ftq_idx; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_edge_inst; // @[fp-pipeline.scala:89:24]
wire [5:0] iss_uops_0_pc_lob; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_taken; // @[fp-pipeline.scala:89:24]
wire [19:0] iss_uops_0_imm_packed; // @[fp-pipeline.scala:89:24]
wire [11:0] iss_uops_0_csr_addr; // @[fp-pipeline.scala:89:24]
wire [6:0] iss_uops_0_rob_idx; // @[fp-pipeline.scala:89:24]
wire [4:0] iss_uops_0_ldq_idx; // @[fp-pipeline.scala:89:24]
wire [4:0] iss_uops_0_stq_idx; // @[fp-pipeline.scala:89:24]
wire [1:0] iss_uops_0_rxq_idx; // @[fp-pipeline.scala:89:24]
wire [6:0] iss_uops_0_pdst; // @[fp-pipeline.scala:89:24]
wire [6:0] iss_uops_0_prs1; // @[fp-pipeline.scala:89:24]
wire [6:0] iss_uops_0_prs2; // @[fp-pipeline.scala:89:24]
wire [6:0] iss_uops_0_prs3; // @[fp-pipeline.scala:89:24]
wire [4:0] iss_uops_0_ppred; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_prs1_busy; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_prs2_busy; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_prs3_busy; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_ppred_busy; // @[fp-pipeline.scala:89:24]
wire [6:0] iss_uops_0_stale_pdst; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_exception; // @[fp-pipeline.scala:89:24]
wire [63:0] iss_uops_0_exc_cause; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_bypassable; // @[fp-pipeline.scala:89:24]
wire [4:0] iss_uops_0_mem_cmd; // @[fp-pipeline.scala:89:24]
wire [1:0] iss_uops_0_mem_size; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_mem_signed; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_is_fence; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_is_fencei; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_is_amo; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_uses_ldq; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_uses_stq; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_is_sys_pc2epc; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_is_unique; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_flush_on_commit; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_ldst_is_rs1; // @[fp-pipeline.scala:89:24]
wire [5:0] iss_uops_0_ldst; // @[fp-pipeline.scala:89:24]
wire [5:0] iss_uops_0_lrs1; // @[fp-pipeline.scala:89:24]
wire [5:0] iss_uops_0_lrs2; // @[fp-pipeline.scala:89:24]
wire [5:0] iss_uops_0_lrs3; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_ldst_val; // @[fp-pipeline.scala:89:24]
wire [1:0] iss_uops_0_dst_rtype; // @[fp-pipeline.scala:89:24]
wire [1:0] iss_uops_0_lrs1_rtype; // @[fp-pipeline.scala:89:24]
wire [1:0] iss_uops_0_lrs2_rtype; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_frs3_en; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_fp_val; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_fp_single; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_xcpt_pf_if; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_xcpt_ae_if; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_xcpt_ma_if; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_bp_debug_if; // @[fp-pipeline.scala:89:24]
wire iss_uops_0_bp_xcpt_if; // @[fp-pipeline.scala:89:24]
wire [1:0] iss_uops_0_debug_fsrc; // @[fp-pipeline.scala:89:24]
wire [1:0] iss_uops_0_debug_tsrc; // @[fp-pipeline.scala:89:24]
wire [9:0] _fdiv_issued_T = iss_uops_0_fu_code & 10'h80; // @[fp-pipeline.scala:89:24]
wire _fdiv_issued_T_1 = |_fdiv_issued_T; // @[micro-op.scala:154:{40,47}]
wire fdiv_issued = iss_valids_0 & _fdiv_issued_T_1; // @[fp-pipeline.scala:88:24, :123:39]
reg [9:0] REG; // @[fp-pipeline.scala:124:36]
wire _ll_wbarb_io_in_0_bits_data_T = io_ll_wports_0_bits_uop_mem_size_0 != 2'h2; // @[fp-pipeline.scala:28:7, :176:75]
wire _ll_wbarb_io_in_0_bits_data_T_1 = _ll_wbarb_io_in_0_bits_data_T; // @[package.scala:39:86]
wire [63:0] _ll_wbarb_io_in_0_bits_data_T_2 = _ll_wbarb_io_in_0_bits_data_T_1 ? 64'h0 : 64'hFFFFFFFF00000000; // @[package.scala:39:{76,86}]
wire [64:0] _ll_wbarb_io_in_0_bits_data_T_3 = {1'h0, _ll_wbarb_io_in_0_bits_data_T_2} | io_ll_wports_0_bits_data_0; // @[package.scala:39:76]
wire ll_wbarb_io_in_0_bits_data_rawIn_sign = _ll_wbarb_io_in_0_bits_data_T_3[63]; // @[FPU.scala:431:23]
wire ll_wbarb_io_in_0_bits_data_rawIn_sign_0 = ll_wbarb_io_in_0_bits_data_rawIn_sign; // @[rawFloatFromFN.scala:44:18, :63:19]
wire [10:0] ll_wbarb_io_in_0_bits_data_rawIn_expIn = _ll_wbarb_io_in_0_bits_data_T_3[62:52]; // @[FPU.scala:431:23]
wire [51:0] ll_wbarb_io_in_0_bits_data_rawIn_fractIn = _ll_wbarb_io_in_0_bits_data_T_3[51:0]; // @[FPU.scala:431:23]
wire ll_wbarb_io_in_0_bits_data_rawIn_isZeroExpIn = ll_wbarb_io_in_0_bits_data_rawIn_expIn == 11'h0; // @[rawFloatFromFN.scala:45:19, :48:30]
wire ll_wbarb_io_in_0_bits_data_rawIn_isZeroFractIn = ll_wbarb_io_in_0_bits_data_rawIn_fractIn == 52'h0; // @[rawFloatFromFN.scala:46:21, :49:34]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[0]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_1 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[1]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_2 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[2]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_3 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[3]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_4 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[4]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_5 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[5]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_6 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[6]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_7 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[7]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_8 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[8]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_9 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[9]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_10 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[10]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_11 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[11]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_12 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[12]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_13 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[13]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_14 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[14]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_15 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[15]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_16 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[16]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_17 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[17]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_18 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[18]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_19 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[19]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_20 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[20]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_21 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[21]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_22 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[22]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_23 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[23]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_24 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[24]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_25 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[25]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_26 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[26]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_27 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[27]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_28 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[28]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_29 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[29]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_30 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[30]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_31 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[31]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_32 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[32]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_33 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[33]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_34 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[34]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_35 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[35]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_36 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[36]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_37 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[37]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_38 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[38]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_39 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[39]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_40 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[40]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_41 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[41]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_42 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[42]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_43 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[43]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_44 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[44]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_45 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[45]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_46 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[46]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_47 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[47]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_48 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[48]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_49 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[49]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_50 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[50]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_51 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn[51]; // @[rawFloatFromFN.scala:46:21]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_52 = {5'h19, ~_ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_1}; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_53 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_2 ? 6'h31 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_52; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_54 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_3 ? 6'h30 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_53; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_55 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_4 ? 6'h2F : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_54; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_56 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_5 ? 6'h2E : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_55; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_57 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_6 ? 6'h2D : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_56; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_58 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_7 ? 6'h2C : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_57; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_59 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_8 ? 6'h2B : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_58; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_60 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_9 ? 6'h2A : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_59; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_61 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_10 ? 6'h29 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_60; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_62 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_11 ? 6'h28 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_61; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_63 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_12 ? 6'h27 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_62; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_64 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_13 ? 6'h26 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_63; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_65 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_14 ? 6'h25 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_64; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_66 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_15 ? 6'h24 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_65; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_67 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_16 ? 6'h23 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_66; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_68 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_17 ? 6'h22 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_67; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_69 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_18 ? 6'h21 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_68; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_70 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_19 ? 6'h20 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_69; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_71 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_20 ? 6'h1F : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_70; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_72 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_21 ? 6'h1E : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_71; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_73 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_22 ? 6'h1D : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_72; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_74 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_23 ? 6'h1C : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_73; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_75 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_24 ? 6'h1B : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_74; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_76 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_25 ? 6'h1A : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_75; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_77 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_26 ? 6'h19 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_76; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_78 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_27 ? 6'h18 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_77; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_79 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_28 ? 6'h17 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_78; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_80 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_29 ? 6'h16 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_79; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_81 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_30 ? 6'h15 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_80; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_82 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_31 ? 6'h14 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_81; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_83 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_32 ? 6'h13 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_82; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_84 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_33 ? 6'h12 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_83; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_85 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_34 ? 6'h11 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_84; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_86 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_35 ? 6'h10 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_85; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_87 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_36 ? 6'hF : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_86; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_88 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_37 ? 6'hE : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_87; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_89 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_38 ? 6'hD : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_88; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_90 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_39 ? 6'hC : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_89; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_91 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_40 ? 6'hB : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_90; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_92 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_41 ? 6'hA : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_91; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_93 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_42 ? 6'h9 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_92; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_94 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_43 ? 6'h8 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_93; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_95 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_44 ? 6'h7 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_94; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_96 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_45 ? 6'h6 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_95; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_97 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_46 ? 6'h5 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_96; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_98 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_47 ? 6'h4 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_97; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_99 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_48 ? 6'h3 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_98; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_100 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_49 ? 6'h2 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_99; // @[Mux.scala:50:70]
wire [5:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_101 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_50 ? 6'h1 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_100; // @[Mux.scala:50:70]
wire [5:0] ll_wbarb_io_in_0_bits_data_rawIn_normDist = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_51 ? 6'h0 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_101; // @[Mux.scala:50:70]
wire [114:0] _ll_wbarb_io_in_0_bits_data_rawIn_subnormFract_T = {63'h0, ll_wbarb_io_in_0_bits_data_rawIn_fractIn} << ll_wbarb_io_in_0_bits_data_rawIn_normDist; // @[Mux.scala:50:70]
wire [50:0] _ll_wbarb_io_in_0_bits_data_rawIn_subnormFract_T_1 = _ll_wbarb_io_in_0_bits_data_rawIn_subnormFract_T[50:0]; // @[rawFloatFromFN.scala:52:{33,46}]
wire [51:0] ll_wbarb_io_in_0_bits_data_rawIn_subnormFract = {_ll_wbarb_io_in_0_bits_data_rawIn_subnormFract_T_1, 1'h0}; // @[rawFloatFromFN.scala:52:{46,64}]
wire [11:0] _ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp_T = {6'h3F, ~ll_wbarb_io_in_0_bits_data_rawIn_normDist}; // @[Mux.scala:50:70]
wire [11:0] _ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp_T_1 = ll_wbarb_io_in_0_bits_data_rawIn_isZeroExpIn ? _ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp_T : {1'h0, ll_wbarb_io_in_0_bits_data_rawIn_expIn}; // @[rawFloatFromFN.scala:45:19, :48:30, :54:10, :55:18]
wire [1:0] _ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp_T_2 = ll_wbarb_io_in_0_bits_data_rawIn_isZeroExpIn ? 2'h2 : 2'h1; // @[rawFloatFromFN.scala:48:30, :58:14]
wire [10:0] _ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp_T_3 = {9'h100, _ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp_T_2}; // @[rawFloatFromFN.scala:58:{9,14}]
wire [12:0] _ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp_T_4 = {1'h0, _ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp_T_1} + {2'h0, _ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp_T_3}; // @[rawFloatFromFN.scala:54:10, :57:9, :58:9]
wire [11:0] ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp = _ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp_T_4[11:0]; // @[rawFloatFromFN.scala:57:9]
wire [11:0] _ll_wbarb_io_in_0_bits_data_rawIn_out_sExp_T = ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp; // @[rawFloatFromFN.scala:57:9, :68:28]
wire ll_wbarb_io_in_0_bits_data_rawIn_isZero = ll_wbarb_io_in_0_bits_data_rawIn_isZeroExpIn & ll_wbarb_io_in_0_bits_data_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:48:30, :49:34, :60:30]
wire ll_wbarb_io_in_0_bits_data_rawIn_isZero_0 = ll_wbarb_io_in_0_bits_data_rawIn_isZero; // @[rawFloatFromFN.scala:60:30, :63:19]
wire [1:0] _ll_wbarb_io_in_0_bits_data_rawIn_isSpecial_T = ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp[11:10]; // @[rawFloatFromFN.scala:57:9, :61:32]
wire ll_wbarb_io_in_0_bits_data_rawIn_isSpecial = &_ll_wbarb_io_in_0_bits_data_rawIn_isSpecial_T; // @[rawFloatFromFN.scala:61:{32,57}]
wire _ll_wbarb_io_in_0_bits_data_rawIn_out_isNaN_T_1; // @[rawFloatFromFN.scala:64:28]
wire _ll_wbarb_io_in_0_bits_data_rawIn_out_isInf_T; // @[rawFloatFromFN.scala:65:28]
wire _ll_wbarb_io_in_0_bits_data_T_6 = ll_wbarb_io_in_0_bits_data_rawIn_isNaN; // @[recFNFromFN.scala:49:20]
wire [12:0] _ll_wbarb_io_in_0_bits_data_rawIn_out_sExp_T_1; // @[rawFloatFromFN.scala:68:42]
wire [53:0] _ll_wbarb_io_in_0_bits_data_rawIn_out_sig_T_3; // @[rawFloatFromFN.scala:70:27]
wire ll_wbarb_io_in_0_bits_data_rawIn_isInf; // @[rawFloatFromFN.scala:63:19]
wire [12:0] ll_wbarb_io_in_0_bits_data_rawIn_sExp; // @[rawFloatFromFN.scala:63:19]
wire [53:0] ll_wbarb_io_in_0_bits_data_rawIn_sig; // @[rawFloatFromFN.scala:63:19]
wire _ll_wbarb_io_in_0_bits_data_rawIn_out_isNaN_T = ~ll_wbarb_io_in_0_bits_data_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:49:34, :64:31]
assign _ll_wbarb_io_in_0_bits_data_rawIn_out_isNaN_T_1 = ll_wbarb_io_in_0_bits_data_rawIn_isSpecial & _ll_wbarb_io_in_0_bits_data_rawIn_out_isNaN_T; // @[rawFloatFromFN.scala:61:57, :64:{28,31}]
assign ll_wbarb_io_in_0_bits_data_rawIn_isNaN = _ll_wbarb_io_in_0_bits_data_rawIn_out_isNaN_T_1; // @[rawFloatFromFN.scala:63:19, :64:28]
assign _ll_wbarb_io_in_0_bits_data_rawIn_out_isInf_T = ll_wbarb_io_in_0_bits_data_rawIn_isSpecial & ll_wbarb_io_in_0_bits_data_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:49:34, :61:57, :65:28]
assign ll_wbarb_io_in_0_bits_data_rawIn_isInf = _ll_wbarb_io_in_0_bits_data_rawIn_out_isInf_T; // @[rawFloatFromFN.scala:63:19, :65:28]
assign _ll_wbarb_io_in_0_bits_data_rawIn_out_sExp_T_1 = {1'h0, _ll_wbarb_io_in_0_bits_data_rawIn_out_sExp_T}; // @[rawFloatFromFN.scala:68:{28,42}]
assign ll_wbarb_io_in_0_bits_data_rawIn_sExp = _ll_wbarb_io_in_0_bits_data_rawIn_out_sExp_T_1; // @[rawFloatFromFN.scala:63:19, :68:42]
wire _ll_wbarb_io_in_0_bits_data_rawIn_out_sig_T = ~ll_wbarb_io_in_0_bits_data_rawIn_isZero; // @[rawFloatFromFN.scala:60:30, :70:19]
wire [1:0] _ll_wbarb_io_in_0_bits_data_rawIn_out_sig_T_1 = {1'h0, _ll_wbarb_io_in_0_bits_data_rawIn_out_sig_T}; // @[rawFloatFromFN.scala:70:{16,19}]
wire [51:0] _ll_wbarb_io_in_0_bits_data_rawIn_out_sig_T_2 = ll_wbarb_io_in_0_bits_data_rawIn_isZeroExpIn ? ll_wbarb_io_in_0_bits_data_rawIn_subnormFract : ll_wbarb_io_in_0_bits_data_rawIn_fractIn; // @[rawFloatFromFN.scala:46:21, :48:30, :52:64, :70:33]
assign _ll_wbarb_io_in_0_bits_data_rawIn_out_sig_T_3 = {_ll_wbarb_io_in_0_bits_data_rawIn_out_sig_T_1, _ll_wbarb_io_in_0_bits_data_rawIn_out_sig_T_2}; // @[rawFloatFromFN.scala:70:{16,27,33}]
assign ll_wbarb_io_in_0_bits_data_rawIn_sig = _ll_wbarb_io_in_0_bits_data_rawIn_out_sig_T_3; // @[rawFloatFromFN.scala:63:19, :70:27]
wire [2:0] _ll_wbarb_io_in_0_bits_data_T_4 = ll_wbarb_io_in_0_bits_data_rawIn_sExp[11:9]; // @[recFNFromFN.scala:48:50]
wire [2:0] _ll_wbarb_io_in_0_bits_data_T_5 = ll_wbarb_io_in_0_bits_data_rawIn_isZero_0 ? 3'h0 : _ll_wbarb_io_in_0_bits_data_T_4; // @[recFNFromFN.scala:48:{15,50}]
wire [2:0] _ll_wbarb_io_in_0_bits_data_T_7 = {_ll_wbarb_io_in_0_bits_data_T_5[2:1], _ll_wbarb_io_in_0_bits_data_T_5[0] | _ll_wbarb_io_in_0_bits_data_T_6}; // @[recFNFromFN.scala:48:{15,76}, :49:20]
wire [3:0] _ll_wbarb_io_in_0_bits_data_T_8 = {ll_wbarb_io_in_0_bits_data_rawIn_sign_0, _ll_wbarb_io_in_0_bits_data_T_7}; // @[recFNFromFN.scala:47:20, :48:76]
wire [8:0] _ll_wbarb_io_in_0_bits_data_T_9 = ll_wbarb_io_in_0_bits_data_rawIn_sExp[8:0]; // @[recFNFromFN.scala:50:23]
wire [12:0] _ll_wbarb_io_in_0_bits_data_T_10 = {_ll_wbarb_io_in_0_bits_data_T_8, _ll_wbarb_io_in_0_bits_data_T_9}; // @[recFNFromFN.scala:47:20, :49:45, :50:23]
wire [51:0] _ll_wbarb_io_in_0_bits_data_T_11 = ll_wbarb_io_in_0_bits_data_rawIn_sig[51:0]; // @[recFNFromFN.scala:51:22]
wire [64:0] _ll_wbarb_io_in_0_bits_data_T_12 = {_ll_wbarb_io_in_0_bits_data_T_10, _ll_wbarb_io_in_0_bits_data_T_11}; // @[recFNFromFN.scala:49:45, :50:41, :51:22]
wire ll_wbarb_io_in_0_bits_data_rawIn_sign_1 = _ll_wbarb_io_in_0_bits_data_T_3[31]; // @[FPU.scala:431:23]
wire ll_wbarb_io_in_0_bits_data_rawIn_1_sign = ll_wbarb_io_in_0_bits_data_rawIn_sign_1; // @[rawFloatFromFN.scala:44:18, :63:19]
wire [7:0] ll_wbarb_io_in_0_bits_data_rawIn_expIn_1 = _ll_wbarb_io_in_0_bits_data_T_3[30:23]; // @[FPU.scala:431:23]
wire [22:0] ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1 = _ll_wbarb_io_in_0_bits_data_T_3[22:0]; // @[FPU.scala:431:23]
wire ll_wbarb_io_in_0_bits_data_rawIn_isZeroExpIn_1 = ll_wbarb_io_in_0_bits_data_rawIn_expIn_1 == 8'h0; // @[rawFloatFromFN.scala:45:19, :48:30]
wire ll_wbarb_io_in_0_bits_data_rawIn_isZeroFractIn_1 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1 == 23'h0; // @[rawFloatFromFN.scala:46:21, :49:34]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_102 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1[0]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_103 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1[1]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_104 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1[2]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_105 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1[3]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_106 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1[4]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_107 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1[5]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_108 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1[6]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_109 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1[7]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_110 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1[8]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_111 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1[9]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_112 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1[10]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_113 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1[11]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_114 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1[12]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_115 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1[13]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_116 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1[14]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_117 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1[15]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_118 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1[16]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_119 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1[17]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_120 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1[18]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_121 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1[19]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_122 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1[20]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_123 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1[21]; // @[rawFloatFromFN.scala:46:21]
wire _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_124 = ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1[22]; // @[rawFloatFromFN.scala:46:21]
wire [4:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_125 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_103 ? 5'h15 : 5'h16; // @[Mux.scala:50:70]
wire [4:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_126 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_104 ? 5'h14 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_125; // @[Mux.scala:50:70]
wire [4:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_127 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_105 ? 5'h13 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_126; // @[Mux.scala:50:70]
wire [4:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_128 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_106 ? 5'h12 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_127; // @[Mux.scala:50:70]
wire [4:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_129 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_107 ? 5'h11 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_128; // @[Mux.scala:50:70]
wire [4:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_130 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_108 ? 5'h10 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_129; // @[Mux.scala:50:70]
wire [4:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_131 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_109 ? 5'hF : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_130; // @[Mux.scala:50:70]
wire [4:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_132 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_110 ? 5'hE : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_131; // @[Mux.scala:50:70]
wire [4:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_133 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_111 ? 5'hD : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_132; // @[Mux.scala:50:70]
wire [4:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_134 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_112 ? 5'hC : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_133; // @[Mux.scala:50:70]
wire [4:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_135 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_113 ? 5'hB : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_134; // @[Mux.scala:50:70]
wire [4:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_136 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_114 ? 5'hA : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_135; // @[Mux.scala:50:70]
wire [4:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_137 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_115 ? 5'h9 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_136; // @[Mux.scala:50:70]
wire [4:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_138 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_116 ? 5'h8 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_137; // @[Mux.scala:50:70]
wire [4:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_139 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_117 ? 5'h7 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_138; // @[Mux.scala:50:70]
wire [4:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_140 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_118 ? 5'h6 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_139; // @[Mux.scala:50:70]
wire [4:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_141 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_119 ? 5'h5 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_140; // @[Mux.scala:50:70]
wire [4:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_142 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_120 ? 5'h4 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_141; // @[Mux.scala:50:70]
wire [4:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_143 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_121 ? 5'h3 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_142; // @[Mux.scala:50:70]
wire [4:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_144 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_122 ? 5'h2 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_143; // @[Mux.scala:50:70]
wire [4:0] _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_145 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_123 ? 5'h1 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_144; // @[Mux.scala:50:70]
wire [4:0] ll_wbarb_io_in_0_bits_data_rawIn_normDist_1 = _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_124 ? 5'h0 : _ll_wbarb_io_in_0_bits_data_rawIn_normDist_T_145; // @[Mux.scala:50:70]
wire [53:0] _ll_wbarb_io_in_0_bits_data_rawIn_subnormFract_T_2 = {31'h0, ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1} << ll_wbarb_io_in_0_bits_data_rawIn_normDist_1; // @[Mux.scala:50:70]
wire [21:0] _ll_wbarb_io_in_0_bits_data_rawIn_subnormFract_T_3 = _ll_wbarb_io_in_0_bits_data_rawIn_subnormFract_T_2[21:0]; // @[rawFloatFromFN.scala:52:{33,46}]
wire [22:0] ll_wbarb_io_in_0_bits_data_rawIn_subnormFract_1 = {_ll_wbarb_io_in_0_bits_data_rawIn_subnormFract_T_3, 1'h0}; // @[rawFloatFromFN.scala:52:{46,64}]
wire [8:0] _ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp_T_5 = {4'hF, ~ll_wbarb_io_in_0_bits_data_rawIn_normDist_1}; // @[Mux.scala:50:70]
wire [8:0] _ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp_T_6 = ll_wbarb_io_in_0_bits_data_rawIn_isZeroExpIn_1 ? _ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp_T_5 : {1'h0, ll_wbarb_io_in_0_bits_data_rawIn_expIn_1}; // @[rawFloatFromFN.scala:45:19, :48:30, :54:10, :55:18]
wire [1:0] _ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp_T_7 = ll_wbarb_io_in_0_bits_data_rawIn_isZeroExpIn_1 ? 2'h2 : 2'h1; // @[rawFloatFromFN.scala:48:30, :58:14]
wire [7:0] _ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp_T_8 = {6'h20, _ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp_T_7}; // @[rawFloatFromFN.scala:58:{9,14}]
wire [9:0] _ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp_T_9 = {1'h0, _ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp_T_6} + {2'h0, _ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp_T_8}; // @[rawFloatFromFN.scala:54:10, :57:9, :58:9]
wire [8:0] ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp_1 = _ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp_T_9[8:0]; // @[rawFloatFromFN.scala:57:9]
wire [8:0] _ll_wbarb_io_in_0_bits_data_rawIn_out_sExp_T_2 = ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp_1; // @[rawFloatFromFN.scala:57:9, :68:28]
wire ll_wbarb_io_in_0_bits_data_rawIn_isZero_1 = ll_wbarb_io_in_0_bits_data_rawIn_isZeroExpIn_1 & ll_wbarb_io_in_0_bits_data_rawIn_isZeroFractIn_1; // @[rawFloatFromFN.scala:48:30, :49:34, :60:30]
wire ll_wbarb_io_in_0_bits_data_rawIn_1_isZero = ll_wbarb_io_in_0_bits_data_rawIn_isZero_1; // @[rawFloatFromFN.scala:60:30, :63:19]
wire [1:0] _ll_wbarb_io_in_0_bits_data_rawIn_isSpecial_T_1 = ll_wbarb_io_in_0_bits_data_rawIn_adjustedExp_1[8:7]; // @[rawFloatFromFN.scala:57:9, :61:32]
wire ll_wbarb_io_in_0_bits_data_rawIn_isSpecial_1 = &_ll_wbarb_io_in_0_bits_data_rawIn_isSpecial_T_1; // @[rawFloatFromFN.scala:61:{32,57}]
wire _ll_wbarb_io_in_0_bits_data_rawIn_out_isNaN_T_3; // @[rawFloatFromFN.scala:64:28]
wire _ll_wbarb_io_in_0_bits_data_rawIn_out_isInf_T_1; // @[rawFloatFromFN.scala:65:28]
wire _ll_wbarb_io_in_0_bits_data_T_15 = ll_wbarb_io_in_0_bits_data_rawIn_1_isNaN; // @[recFNFromFN.scala:49:20]
wire [9:0] _ll_wbarb_io_in_0_bits_data_rawIn_out_sExp_T_3; // @[rawFloatFromFN.scala:68:42]
wire [24:0] _ll_wbarb_io_in_0_bits_data_rawIn_out_sig_T_7; // @[rawFloatFromFN.scala:70:27]
wire ll_wbarb_io_in_0_bits_data_rawIn_1_isInf; // @[rawFloatFromFN.scala:63:19]
wire [9:0] ll_wbarb_io_in_0_bits_data_rawIn_1_sExp; // @[rawFloatFromFN.scala:63:19]
wire [24:0] ll_wbarb_io_in_0_bits_data_rawIn_1_sig; // @[rawFloatFromFN.scala:63:19]
wire _ll_wbarb_io_in_0_bits_data_rawIn_out_isNaN_T_2 = ~ll_wbarb_io_in_0_bits_data_rawIn_isZeroFractIn_1; // @[rawFloatFromFN.scala:49:34, :64:31]
assign _ll_wbarb_io_in_0_bits_data_rawIn_out_isNaN_T_3 = ll_wbarb_io_in_0_bits_data_rawIn_isSpecial_1 & _ll_wbarb_io_in_0_bits_data_rawIn_out_isNaN_T_2; // @[rawFloatFromFN.scala:61:57, :64:{28,31}]
assign ll_wbarb_io_in_0_bits_data_rawIn_1_isNaN = _ll_wbarb_io_in_0_bits_data_rawIn_out_isNaN_T_3; // @[rawFloatFromFN.scala:63:19, :64:28]
assign _ll_wbarb_io_in_0_bits_data_rawIn_out_isInf_T_1 = ll_wbarb_io_in_0_bits_data_rawIn_isSpecial_1 & ll_wbarb_io_in_0_bits_data_rawIn_isZeroFractIn_1; // @[rawFloatFromFN.scala:49:34, :61:57, :65:28]
assign ll_wbarb_io_in_0_bits_data_rawIn_1_isInf = _ll_wbarb_io_in_0_bits_data_rawIn_out_isInf_T_1; // @[rawFloatFromFN.scala:63:19, :65:28]
assign _ll_wbarb_io_in_0_bits_data_rawIn_out_sExp_T_3 = {1'h0, _ll_wbarb_io_in_0_bits_data_rawIn_out_sExp_T_2}; // @[rawFloatFromFN.scala:68:{28,42}]
assign ll_wbarb_io_in_0_bits_data_rawIn_1_sExp = _ll_wbarb_io_in_0_bits_data_rawIn_out_sExp_T_3; // @[rawFloatFromFN.scala:63:19, :68:42]
wire _ll_wbarb_io_in_0_bits_data_rawIn_out_sig_T_4 = ~ll_wbarb_io_in_0_bits_data_rawIn_isZero_1; // @[rawFloatFromFN.scala:60:30, :70:19]
wire [1:0] _ll_wbarb_io_in_0_bits_data_rawIn_out_sig_T_5 = {1'h0, _ll_wbarb_io_in_0_bits_data_rawIn_out_sig_T_4}; // @[rawFloatFromFN.scala:70:{16,19}]
wire [22:0] _ll_wbarb_io_in_0_bits_data_rawIn_out_sig_T_6 = ll_wbarb_io_in_0_bits_data_rawIn_isZeroExpIn_1 ? ll_wbarb_io_in_0_bits_data_rawIn_subnormFract_1 : ll_wbarb_io_in_0_bits_data_rawIn_fractIn_1; // @[rawFloatFromFN.scala:46:21, :48:30, :52:64, :70:33]
assign _ll_wbarb_io_in_0_bits_data_rawIn_out_sig_T_7 = {_ll_wbarb_io_in_0_bits_data_rawIn_out_sig_T_5, _ll_wbarb_io_in_0_bits_data_rawIn_out_sig_T_6}; // @[rawFloatFromFN.scala:70:{16,27,33}]
assign ll_wbarb_io_in_0_bits_data_rawIn_1_sig = _ll_wbarb_io_in_0_bits_data_rawIn_out_sig_T_7; // @[rawFloatFromFN.scala:63:19, :70:27]
wire [2:0] _ll_wbarb_io_in_0_bits_data_T_13 = ll_wbarb_io_in_0_bits_data_rawIn_1_sExp[8:6]; // @[recFNFromFN.scala:48:50]
wire [2:0] _ll_wbarb_io_in_0_bits_data_T_14 = ll_wbarb_io_in_0_bits_data_rawIn_1_isZero ? 3'h0 : _ll_wbarb_io_in_0_bits_data_T_13; // @[recFNFromFN.scala:48:{15,50}]
wire [2:0] _ll_wbarb_io_in_0_bits_data_T_16 = {_ll_wbarb_io_in_0_bits_data_T_14[2:1], _ll_wbarb_io_in_0_bits_data_T_14[0] | _ll_wbarb_io_in_0_bits_data_T_15}; // @[recFNFromFN.scala:48:{15,76}, :49:20]
wire [3:0] _ll_wbarb_io_in_0_bits_data_T_17 = {ll_wbarb_io_in_0_bits_data_rawIn_1_sign, _ll_wbarb_io_in_0_bits_data_T_16}; // @[recFNFromFN.scala:47:20, :48:76]
wire [5:0] _ll_wbarb_io_in_0_bits_data_T_18 = ll_wbarb_io_in_0_bits_data_rawIn_1_sExp[5:0]; // @[recFNFromFN.scala:50:23]
wire [9:0] _ll_wbarb_io_in_0_bits_data_T_19 = {_ll_wbarb_io_in_0_bits_data_T_17, _ll_wbarb_io_in_0_bits_data_T_18}; // @[recFNFromFN.scala:47:20, :49:45, :50:23]
wire [22:0] _ll_wbarb_io_in_0_bits_data_T_20 = ll_wbarb_io_in_0_bits_data_rawIn_1_sig[22:0]; // @[recFNFromFN.scala:51:22]
wire [32:0] _ll_wbarb_io_in_0_bits_data_T_21 = {_ll_wbarb_io_in_0_bits_data_T_19, _ll_wbarb_io_in_0_bits_data_T_20}; // @[recFNFromFN.scala:49:45, :50:41, :51:22]
wire [3:0] _ll_wbarb_io_in_0_bits_data_swizzledNaN_T = _ll_wbarb_io_in_0_bits_data_T_12[64:61]; // @[FPU.scala:337:8]
wire [19:0] _ll_wbarb_io_in_0_bits_data_swizzledNaN_T_1 = _ll_wbarb_io_in_0_bits_data_T_12[51:32]; // @[FPU.scala:338:8]
wire [19:0] _ll_wbarb_io_in_0_bits_data_swizzledNaN_T_5 = _ll_wbarb_io_in_0_bits_data_T_12[51:32]; // @[FPU.scala:338:8, :341:8]
wire _ll_wbarb_io_in_0_bits_data_swizzledNaN_T_2 = &_ll_wbarb_io_in_0_bits_data_swizzledNaN_T_1; // @[FPU.scala:338:{8,42}]
wire [6:0] _ll_wbarb_io_in_0_bits_data_swizzledNaN_T_3 = _ll_wbarb_io_in_0_bits_data_T_12[59:53]; // @[FPU.scala:339:8]
wire _ll_wbarb_io_in_0_bits_data_swizzledNaN_T_4 = _ll_wbarb_io_in_0_bits_data_T_21[31]; // @[FPU.scala:340:8]
wire _ll_wbarb_io_in_0_bits_data_swizzledNaN_T_6 = _ll_wbarb_io_in_0_bits_data_T_21[32]; // @[FPU.scala:342:8]
wire [30:0] _ll_wbarb_io_in_0_bits_data_swizzledNaN_T_7 = _ll_wbarb_io_in_0_bits_data_T_21[30:0]; // @[FPU.scala:343:8]
wire [20:0] ll_wbarb_io_in_0_bits_data_swizzledNaN_lo_hi = {_ll_wbarb_io_in_0_bits_data_swizzledNaN_T_5, _ll_wbarb_io_in_0_bits_data_swizzledNaN_T_6}; // @[FPU.scala:336:26, :341:8, :342:8]
wire [51:0] ll_wbarb_io_in_0_bits_data_swizzledNaN_lo = {ll_wbarb_io_in_0_bits_data_swizzledNaN_lo_hi, _ll_wbarb_io_in_0_bits_data_swizzledNaN_T_7}; // @[FPU.scala:336:26, :343:8]
wire [7:0] ll_wbarb_io_in_0_bits_data_swizzledNaN_hi_lo = {_ll_wbarb_io_in_0_bits_data_swizzledNaN_T_3, _ll_wbarb_io_in_0_bits_data_swizzledNaN_T_4}; // @[FPU.scala:336:26, :339:8, :340:8]
wire [4:0] ll_wbarb_io_in_0_bits_data_swizzledNaN_hi_hi = {_ll_wbarb_io_in_0_bits_data_swizzledNaN_T, _ll_wbarb_io_in_0_bits_data_swizzledNaN_T_2}; // @[FPU.scala:336:26, :337:8, :338:42]
wire [12:0] ll_wbarb_io_in_0_bits_data_swizzledNaN_hi = {ll_wbarb_io_in_0_bits_data_swizzledNaN_hi_hi, ll_wbarb_io_in_0_bits_data_swizzledNaN_hi_lo}; // @[FPU.scala:336:26]
wire [64:0] ll_wbarb_io_in_0_bits_data_swizzledNaN = {ll_wbarb_io_in_0_bits_data_swizzledNaN_hi, ll_wbarb_io_in_0_bits_data_swizzledNaN_lo}; // @[FPU.scala:336:26]
wire [2:0] _ll_wbarb_io_in_0_bits_data_T_22 = _ll_wbarb_io_in_0_bits_data_T_12[63:61]; // @[FPU.scala:249:25]
wire _ll_wbarb_io_in_0_bits_data_T_23 = &_ll_wbarb_io_in_0_bits_data_T_22; // @[FPU.scala:249:{25,56}]
wire [64:0] _ll_wbarb_io_in_0_bits_data_T_24 = _ll_wbarb_io_in_0_bits_data_T_23 ? ll_wbarb_io_in_0_bits_data_swizzledNaN : _ll_wbarb_io_in_0_bits_data_T_12; // @[FPU.scala:249:56, :336:26, :344:8]
wire _fregfile_io_write_ports_0_wport_valid_T_1; // @[regfile.scala:57:35]
wire [6:0] fregfile_io_write_ports_0_wport_bits_addr; // @[regfile.scala:55:22]
wire [64:0] fregfile_io_write_ports_0_wport_bits_data; // @[regfile.scala:55:22]
wire fregfile_io_write_ports_0_wport_valid; // @[regfile.scala:55:22]
wire _fregfile_io_write_ports_0_wport_valid_T = _ll_wbarb_io_out_bits_uop_dst_rtype == 2'h1; // @[regfile.scala:57:61]
assign _fregfile_io_write_ports_0_wport_valid_T_1 = _ll_wbarb_io_out_valid & _fregfile_io_write_ports_0_wport_valid_T; // @[regfile.scala:57:{35,61}]
assign fregfile_io_write_ports_0_wport_valid = _fregfile_io_write_ports_0_wport_valid_T_1; // @[regfile.scala:55:22, :57:35]
reg fregfile_io_write_ports_0_REG_valid; // @[fp-pipeline.scala:185:40]
reg [6:0] fregfile_io_write_ports_0_REG_bits_addr; // @[fp-pipeline.scala:185:40]
reg [64:0] fregfile_io_write_ports_0_REG_bits_data; // @[fp-pipeline.scala:185:40]
wire _fregfile_io_write_ports_1_valid_T = _fpu_exe_unit_io_fresp_bits_uop_dst_rtype != 2'h2; // @[execution-units.scala:131:32]
wire _fregfile_io_write_ports_1_valid_T_1 = _fpu_exe_unit_io_fresp_valid & _fregfile_io_write_ports_1_valid_T; // @[fp-pipeline.scala:199:69]
wire _io_wakeups_1_valid_T = _fpu_exe_unit_io_fresp_bits_uop_dst_rtype == 2'h1; // @[fp-pipeline.scala:206:{15,47}, :243:57]
wire fpiu_is_sdq = _fpu_exe_unit_io_ll_iresp_bits_uop_uopc == 7'h2; // @[fp-pipeline.scala:214:57]
wire _fpu_exe_unit_io_ll_iresp_ready_T; // @[fp-pipeline.scala:219:50]
wire _GEN = _fpu_exe_unit_io_ll_iresp_ready_T & _fpu_exe_unit_io_ll_iresp_valid; // @[Decoupled.scala:51:35]
wire _io_to_int_valid_T; // @[Decoupled.scala:51:35]
assign _io_to_int_valid_T = _GEN; // @[Decoupled.scala:51:35]
wire _io_to_sdq_valid_T; // @[Decoupled.scala:51:35]
assign _io_to_sdq_valid_T = _GEN; // @[Decoupled.scala:51:35]
wire _io_to_int_valid_T_1 = ~fpiu_is_sdq; // @[fp-pipeline.scala:214:57, :215:52]
assign _io_to_int_valid_T_2 = _io_to_int_valid_T & _io_to_int_valid_T_1; // @[Decoupled.scala:51:35]
assign io_to_int_valid_0 = _io_to_int_valid_T_2; // @[fp-pipeline.scala:28:7, :215:49]
assign _io_to_sdq_valid_T_1 = _io_to_sdq_valid_T & fpiu_is_sdq; // @[Decoupled.scala:51:35]
assign io_to_sdq_valid_0 = _io_to_sdq_valid_T_1; // @[fp-pipeline.scala:28:7, :216:49]
assign io_to_sdq_bits_data_0 = _fpu_exe_unit_io_ll_iresp_bits_data[63:0]; // @[fp-pipeline.scala:28:7, :217:19]
assign io_to_int_bits_data_0 = _fpu_exe_unit_io_ll_iresp_bits_data[63:0]; // @[fp-pipeline.scala:28:7, :217:19]
assign _fpu_exe_unit_io_ll_iresp_ready_T = io_to_sdq_ready_0 & io_to_int_ready_0; // @[fp-pipeline.scala:28:7, :219:50]
assign _io_wakeups_1_valid_T_1 = _fpu_exe_unit_io_fresp_valid & _io_wakeups_1_valid_T; // @[fp-pipeline.scala:243:{37,57}]
assign io_wakeups_1_valid_0 = _io_wakeups_1_valid_T_1; // @[fp-pipeline.scala:28:7, :243:37] |
Generate the Verilog code corresponding to the following Chisel files.
File RoundAnyRawFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.Fill
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundAnyRawFNToRecFN(
inExpWidth: Int,
inSigWidth: Int,
outExpWidth: Int,
outSigWidth: Int,
options: Int
)
extends RawModule
{
override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(inExpWidth, inSigWidth))
// (allowed exponent range has limits)
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0)
val effectiveInSigWidth =
if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1
val neverUnderflows =
((options &
(flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact)
) != 0) ||
(inExpWidth < outExpWidth)
val neverOverflows =
((options & flRoundOpt_neverOverflows) != 0) ||
(inExpWidth < outExpWidth)
val outNaNExp = BigInt(7)<<(outExpWidth - 2)
val outInfExp = BigInt(6)<<(outExpWidth - 2)
val outMaxFiniteExp = outInfExp - 1
val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2
val outMinNonzeroExp = outMinNormExp - outSigWidth + 1
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
val roundMagUp =
(roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sAdjustedExp =
if (inExpWidth < outExpWidth)
(io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
)(outExpWidth, 0).zext
else if (inExpWidth == outExpWidth)
io.in.sExp
else
io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
val adjustedSig =
if (inSigWidth <= outSigWidth + 2)
io.in.sig<<(outSigWidth - inSigWidth + 2)
else
(io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ##
io.in.sig(inSigWidth - outSigWidth - 2, 0).orR
)
val doShiftSigDown1 =
if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2)
val common_expOut = Wire(UInt((outExpWidth + 1).W))
val common_fractOut = Wire(UInt((outSigWidth - 1).W))
val common_overflow = Wire(Bool())
val common_totalUnderflow = Wire(Bool())
val common_underflow = Wire(Bool())
val common_inexact = Wire(Bool())
if (
neverOverflows && neverUnderflows
&& (effectiveInSigWidth <= outSigWidth)
) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1
common_fractOut :=
Mux(doShiftSigDown1,
adjustedSig(outSigWidth + 1, 3),
adjustedSig(outSigWidth, 2)
)
common_overflow := false.B
common_totalUnderflow := false.B
common_underflow := false.B
common_inexact := false.B
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundMask =
if (neverUnderflows)
0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W)
else
(lowMask(
sAdjustedExp(outExpWidth, 0),
outMinNormExp - outSigWidth - 1,
outMinNormExp
) | doShiftSigDown1) ##
3.U(2.W)
val shiftedRoundMask = 0.U(1.W) ## roundMask>>1
val roundPosMask = ~shiftedRoundMask & roundMask
val roundPosBit = (adjustedSig & roundPosMask).orR
val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR
val anyRound = roundPosBit || anyRoundExtra
val roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
roundPosBit) ||
(roundMagUp && anyRound)
val roundedSig: Bits =
Mux(roundIncr,
(((adjustedSig | roundMask)>>2) +& 1.U) &
~Mux(roundingMode_near_even && roundPosBit &&
! anyRoundExtra,
roundMask>>1,
0.U((outSigWidth + 2).W)
),
(adjustedSig & ~roundMask)>>2 |
Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U)
)
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext
common_expOut := sRoundedExp(outExpWidth, 0)
common_fractOut :=
Mux(doShiftSigDown1,
roundedSig(outSigWidth - 1, 1),
roundedSig(outSigWidth - 2, 0)
)
common_overflow :=
(if (neverOverflows) false.B else
//*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?:
(sRoundedExp>>(outExpWidth - 1) >= 3.S))
common_totalUnderflow :=
(if (neverUnderflows) false.B else
//*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?:
(sRoundedExp < outMinNonzeroExp.S))
val unboundedRange_roundPosBit =
Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1))
val unboundedRange_anyRound =
(doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR
val unboundedRange_roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
unboundedRange_roundPosBit) ||
(roundMagUp && unboundedRange_anyRound)
val roundCarry =
Mux(doShiftSigDown1,
roundedSig(outSigWidth + 1),
roundedSig(outSigWidth)
)
common_underflow :=
(if (neverUnderflows) false.B else
common_totalUnderflow ||
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
(anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) &&
Mux(doShiftSigDown1, roundMask(3), roundMask(2)) &&
! ((io.detectTininess === tininess_afterRounding) &&
! Mux(doShiftSigDown1,
roundMask(4),
roundMask(3)
) &&
roundCarry && roundPosBit &&
unboundedRange_roundIncr)))
common_inexact := common_totalUnderflow || anyRound
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val isNaNOut = io.invalidExc || io.in.isNaN
val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf
val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero
val overflow = commonCase && common_overflow
val underflow = commonCase && common_underflow
val inexact = overflow || (commonCase && common_inexact)
val overflow_roundMagUp =
roundingMode_near_even || roundingMode_near_maxMag || roundMagUp
val pegMinNonzeroMagOut =
commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd)
val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp
val notNaN_isInfOut =
notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp)
val signOut = Mux(isNaNOut, false.B, io.in.sign)
val expOut =
(common_expOut &
~Mux(io.in.isZero || common_totalUnderflow,
(BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMinNonzeroMagOut,
~outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMaxFiniteMagOut,
(BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W),
0.U
) &
~Mux(notNaN_isInfOut,
(BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
)) |
Mux(pegMinNonzeroMagOut,
outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) |
Mux(pegMaxFiniteMagOut,
outMaxFiniteExp.U((outExpWidth + 1).W),
0.U
) |
Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) |
Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U)
val fractOut =
Mux(isNaNOut || io.in.isZero || common_totalUnderflow,
Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U),
common_fractOut
) |
Fill(outSigWidth - 1, pegMaxFiniteMagOut)
io.out := signOut ## expOut ## fractOut
io.exceptionFlags :=
io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int)
extends RawModule
{
override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(expWidth, sigWidth + 2))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
expWidth, sigWidth + 2, expWidth, sigWidth, options))
roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc
roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc
roundAnyRawFNToRecFN.io.in := io.in
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
| module RoundAnyRawFNToRecFN_ie2_is1_oe8_os24_12(); // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [8:0] _expOut_T_4 = 9'h194; // @[RoundAnyRawFNToRecFN.scala:258:19]
wire [26:0] adjustedSig = 27'h2000000; // @[RoundAnyRawFNToRecFN.scala:114:22]
wire [22:0] _common_fractOut_T = 23'h400000; // @[RoundAnyRawFNToRecFN.scala:139:28]
wire [8:0] _expOut_T_2 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:253:14, :257:14, :261:14, :265:14]
wire [8:0] _expOut_T_6 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:253:14, :257:14, :261:14, :265:14]
wire [8:0] _expOut_T_9 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:253:14, :257:14, :261:14, :265:14]
wire [8:0] _expOut_T_12 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:253:14, :257:14, :261:14, :265:14]
wire [8:0] _expOut_T_1 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16]
wire [8:0] _expOut_T_5 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16]
wire [8:0] _expOut_T_8 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16]
wire [8:0] _expOut_T_11 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16]
wire [8:0] _expOut_T_14 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16]
wire [8:0] _expOut_T_16 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16]
wire [8:0] _expOut_T_18 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16]
wire [8:0] _expOut_T_20 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16]
wire [8:0] _sAdjustedExp_T_1 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [8:0] common_expOut = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [8:0] _common_expOut_T = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [8:0] _common_expOut_T_2 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [8:0] _expOut_T_3 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [8:0] _expOut_T_7 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [8:0] _expOut_T_10 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [8:0] _expOut_T_13 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [8:0] _expOut_T_15 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [8:0] _expOut_T_17 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [8:0] _expOut_T_19 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [8:0] expOut = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [22:0] common_fractOut = 23'h0; // @[RoundAnyRawFNToRecFN.scala:123:31, :138:16, :140:28, :280:12, :281:16, :283:11, :284:13]
wire [22:0] _common_fractOut_T_1 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:123:31, :138:16, :140:28, :280:12, :281:16, :283:11, :284:13]
wire [22:0] _common_fractOut_T_2 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:123:31, :138:16, :140:28, :280:12, :281:16, :283:11, :284:13]
wire [22:0] _fractOut_T_2 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:123:31, :138:16, :140:28, :280:12, :281:16, :283:11, :284:13]
wire [22:0] _fractOut_T_3 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:123:31, :138:16, :140:28, :280:12, :281:16, :283:11, :284:13]
wire [22:0] _fractOut_T_4 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:123:31, :138:16, :140:28, :280:12, :281:16, :283:11, :284:13]
wire [22:0] fractOut = 23'h0; // @[RoundAnyRawFNToRecFN.scala:123:31, :138:16, :140:28, :280:12, :281:16, :283:11, :284:13]
wire [9:0] _sAdjustedExp_T = 10'h100; // @[RoundAnyRawFNToRecFN.scala:104:25, :136:55, :286:23]
wire [9:0] sAdjustedExp = 10'h100; // @[RoundAnyRawFNToRecFN.scala:106:31, :136:55, :286:23]
wire [9:0] _common_expOut_T_1 = 10'h100; // @[RoundAnyRawFNToRecFN.scala:136:55, :286:23]
wire [9:0] _io_out_T = 10'h100; // @[RoundAnyRawFNToRecFN.scala:136:55, :286:23]
wire [1:0] _io_exceptionFlags_T = 2'h0; // @[RoundAnyRawFNToRecFN.scala:288:23]
wire [3:0] _io_exceptionFlags_T_2 = 4'h0; // @[RoundAnyRawFNToRecFN.scala:288:53]
wire [4:0] io_exceptionFlags = 5'h0; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :288:66]
wire [4:0] _io_exceptionFlags_T_3 = 5'h0; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :288:66]
wire [32:0] io_out = 33'h80000000; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :286:33]
wire [32:0] _io_out_T_1 = 33'h80000000; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :286:33]
wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}]
wire roundingMode_near_even = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}]
wire _roundMagUp_T_1 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}]
wire _commonCase_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}]
wire _commonCase_T_1 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}]
wire _commonCase_T_2 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}]
wire _commonCase_T_3 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}]
wire commonCase = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}]
wire _overflow_roundMagUp_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}]
wire overflow_roundMagUp = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}]
wire [2:0] io_roundingMode = 3'h0; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :288:41]
wire [2:0] _io_exceptionFlags_T_1 = 3'h0; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :288:41]
wire [1:0] io_in_sig = 2'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16]
wire [3:0] io_in_sExp = 4'h4; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16]
wire io_invalidExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isNaN = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isInf = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isZero = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_sign = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire roundingMode_minMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:91:53]
wire roundingMode_min = 1'h0; // @[RoundAnyRawFNToRecFN.scala:92:53]
wire roundingMode_max = 1'h0; // @[RoundAnyRawFNToRecFN.scala:93:53]
wire roundingMode_near_maxMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:94:53]
wire roundingMode_odd = 1'h0; // @[RoundAnyRawFNToRecFN.scala:95:53]
wire _roundMagUp_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:27]
wire _roundMagUp_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:63]
wire roundMagUp = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:42]
wire common_overflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:124:37]
wire common_totalUnderflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:125:37]
wire common_underflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:126:37]
wire common_inexact = 1'h0; // @[RoundAnyRawFNToRecFN.scala:127:37]
wire isNaNOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:235:34]
wire notNaN_isSpecialInfOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:236:49]
wire overflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:238:32]
wire underflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:239:32]
wire _inexact_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:240:43]
wire inexact = 1'h0; // @[RoundAnyRawFNToRecFN.scala:240:28]
wire _pegMinNonzeroMagOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:20]
wire _pegMinNonzeroMagOut_T_1 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:60]
wire pegMinNonzeroMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:45]
wire _pegMaxFiniteMagOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:42]
wire pegMaxFiniteMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:39]
wire _notNaN_isInfOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:248:45]
wire notNaN_isInfOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:248:32]
wire signOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:250:22]
wire _expOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:253:32]
wire _fractOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:280:22]
wire _fractOut_T_1 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:280:38]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File util.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v3.common.{MicroOp}
import boom.v3.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U}
def apply(ip: UInt, isel: UInt): SInt = {
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt
}
}
/**
* Object to get the FP rounding mode out of a packed immediate.
*/
object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } }
/**
* Object to get the FP function fype from a packed immediate.
* Note: only works if !(IS_B or IS_S)
*/
object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } }
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v3.common.BoomModule()(p)
with boom.v3.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire)
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop)
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop))
io.deq.bits := out
io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop)
// For flow queue behavior.
if (flow) {
when (io.empty) {
io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop)
io.deq.bits := io.enq.bits
io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
do_deq := false.B
when (io.deq.ready) { do_enq := false.B }
}
}
private val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
}
File functional-unit.scala:
//******************************************************************************
// Copyright (c) 2013 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Functional Units
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// If regfile bypassing is disabled, then the functional unit must do its own
// bypassing in here on the WB stage (i.e., bypassing the io.resp.data)
//
// TODO: explore possibility of conditional IO fields? if a branch unit... how to add extra to IO in subclass?
package boom.v3.exu
import chisel3._
import chisel3.util._
import chisel3.experimental.dataview._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
import freechips.rocketchip.tile
import freechips.rocketchip.rocket.{PipelinedMultiplier,BP,BreakpointUnit,Causes,CSR}
import boom.v3.common._
import boom.v3.ifu._
import boom.v3.util._
/**t
* Functional unit constants
*/
object FUConstants
{
// bit mask, since a given execution pipeline may support multiple functional units
val FUC_SZ = 10
val FU_X = BitPat.dontCare(FUC_SZ)
val FU_ALU = 1.U(FUC_SZ.W)
val FU_JMP = 2.U(FUC_SZ.W)
val FU_MEM = 4.U(FUC_SZ.W)
val FU_MUL = 8.U(FUC_SZ.W)
val FU_DIV = 16.U(FUC_SZ.W)
val FU_CSR = 32.U(FUC_SZ.W)
val FU_FPU = 64.U(FUC_SZ.W)
val FU_FDV = 128.U(FUC_SZ.W)
val FU_I2F = 256.U(FUC_SZ.W)
val FU_F2I = 512.U(FUC_SZ.W)
// FP stores generate data through FP F2I, and generate address through MemAddrCalc
val FU_F2IMEM = 516.U(FUC_SZ.W)
}
import FUConstants._
/**
* Class to tell the FUDecoders what units it needs to support
*
* @param alu support alu unit?
* @param bru support br unit?
* @param mem support mem unit?
* @param muld support multiple div unit?
* @param fpu support FP unit?
* @param csr support csr writing unit?
* @param fdiv support FP div unit?
* @param ifpu support int to FP unit?
*/
class SupportedFuncUnits(
val alu: Boolean = false,
val jmp: Boolean = false,
val mem: Boolean = false,
val muld: Boolean = false,
val fpu: Boolean = false,
val csr: Boolean = false,
val fdiv: Boolean = false,
val ifpu: Boolean = false)
{
}
/**
* Bundle for signals sent to the functional unit
*
* @param dataWidth width of the data sent to the functional unit
*/
class FuncUnitReq(val dataWidth: Int)(implicit p: Parameters) extends BoomBundle
with HasBoomUOP
{
val numOperands = 3
val rs1_data = UInt(dataWidth.W)
val rs2_data = UInt(dataWidth.W)
val rs3_data = UInt(dataWidth.W) // only used for FMA units
val pred_data = Bool()
val kill = Bool() // kill everything
}
/**
* Bundle for the signals sent out of the function unit
*
* @param dataWidth data sent from the functional unit
*/
class FuncUnitResp(val dataWidth: Int)(implicit p: Parameters) extends BoomBundle
with HasBoomUOP
{
val predicated = Bool() // Was this response from a predicated-off instruction
val data = UInt(dataWidth.W)
val fflags = new ValidIO(new FFlagsResp)
val addr = UInt((vaddrBits+1).W) // only for maddr -> LSU
val mxcpt = new ValidIO(UInt((freechips.rocketchip.rocket.Causes.all.max+2).W)) //only for maddr->LSU
val sfence = Valid(new freechips.rocketchip.rocket.SFenceReq) // only for mcalc
}
/**
* Branch resolution information given from the branch unit
*/
class BrResolutionInfo(implicit p: Parameters) extends BoomBundle
{
val uop = new MicroOp
val valid = Bool()
val mispredict = Bool()
val taken = Bool() // which direction did the branch go?
val cfi_type = UInt(CFI_SZ.W)
// Info for recalculating the pc for this branch
val pc_sel = UInt(2.W)
val jalr_target = UInt(vaddrBitsExtended.W)
val target_offset = SInt()
}
class BrUpdateInfo(implicit p: Parameters) extends BoomBundle
{
// On the first cycle we get masks to kill registers
val b1 = new BrUpdateMasks
// On the second cycle we get indices to reset pointers
val b2 = new BrResolutionInfo
}
class BrUpdateMasks(implicit p: Parameters) extends BoomBundle
{
val resolve_mask = UInt(maxBrCount.W)
val mispredict_mask = UInt(maxBrCount.W)
}
/**
* Abstract top level functional unit class that wraps a lower level hand made functional unit
*
* @param isPipelined is the functional unit pipelined?
* @param numStages how many pipeline stages does the functional unit have
* @param numBypassStages how many bypass stages does the function unit have
* @param dataWidth width of the data being operated on in the functional unit
* @param hasBranchUnit does this functional unit have a branch unit?
*/
abstract class FunctionalUnit(
val isPipelined: Boolean,
val numStages: Int,
val numBypassStages: Int,
val dataWidth: Int,
val isJmpUnit: Boolean = false,
val isAluUnit: Boolean = false,
val isMemAddrCalcUnit: Boolean = false,
val needsFcsr: Boolean = false)
(implicit p: Parameters) extends BoomModule
{
val io = IO(new Bundle {
val req = Flipped(new DecoupledIO(new FuncUnitReq(dataWidth)))
val resp = (new DecoupledIO(new FuncUnitResp(dataWidth)))
val brupdate = Input(new BrUpdateInfo())
val bypass = Output(Vec(numBypassStages, Valid(new ExeUnitResp(dataWidth))))
// only used by the fpu unit
val fcsr_rm = if (needsFcsr) Input(UInt(tile.FPConstants.RM_SZ.W)) else null
// only used by branch unit
val brinfo = if (isAluUnit) Output(new BrResolutionInfo()) else null
val get_ftq_pc = if (isJmpUnit) Flipped(new GetPCFromFtqIO()) else null
val status = if (isMemAddrCalcUnit) Input(new freechips.rocketchip.rocket.MStatus()) else null
// only used by memaddr calc unit
val bp = if (isMemAddrCalcUnit) Input(Vec(nBreakpoints, new BP)) else null
val mcontext = if (isMemAddrCalcUnit) Input(UInt(coreParams.mcontextWidth.W)) else null
val scontext = if (isMemAddrCalcUnit) Input(UInt(coreParams.scontextWidth.W)) else null
})
io.bypass.foreach { b => b.valid := false.B; b.bits := DontCare }
io.resp.valid := false.B
io.resp.bits := DontCare
if (isJmpUnit) {
io.get_ftq_pc.ftq_idx := DontCare
}
}
/**
* Abstract top level pipelined functional unit
*
* Note: this helps track which uops get killed while in intermediate stages,
* but it is the job of the consumer to check for kills on the same cycle as consumption!!!
*
* @param numStages how many pipeline stages does the functional unit have
* @param numBypassStages how many bypass stages does the function unit have
* @param earliestBypassStage first stage that you can start bypassing from
* @param dataWidth width of the data being operated on in the functional unit
* @param hasBranchUnit does this functional unit have a branch unit?
*/
abstract class PipelinedFunctionalUnit(
numStages: Int,
numBypassStages: Int,
earliestBypassStage: Int,
dataWidth: Int,
isJmpUnit: Boolean = false,
isAluUnit: Boolean = false,
isMemAddrCalcUnit: Boolean = false,
needsFcsr: Boolean = false
)(implicit p: Parameters) extends FunctionalUnit(
isPipelined = true,
numStages = numStages,
numBypassStages = numBypassStages,
dataWidth = dataWidth,
isJmpUnit = isJmpUnit,
isAluUnit = isAluUnit,
isMemAddrCalcUnit = isMemAddrCalcUnit,
needsFcsr = needsFcsr)
{
// Pipelined functional unit is always ready.
io.req.ready := true.B
if (numStages > 0) {
val r_valids = RegInit(VecInit(Seq.fill(numStages) { false.B }))
val r_uops = Reg(Vec(numStages, new MicroOp()))
// handle incoming request
r_valids(0) := io.req.valid && !IsKilledByBranch(io.brupdate, io.req.bits.uop) && !io.req.bits.kill
r_uops(0) := io.req.bits.uop
r_uops(0).br_mask := GetNewBrMask(io.brupdate, io.req.bits.uop)
// handle middle of the pipeline
for (i <- 1 until numStages) {
r_valids(i) := r_valids(i-1) && !IsKilledByBranch(io.brupdate, r_uops(i-1)) && !io.req.bits.kill
r_uops(i) := r_uops(i-1)
r_uops(i).br_mask := GetNewBrMask(io.brupdate, r_uops(i-1))
if (numBypassStages > 0) {
io.bypass(i-1).bits.uop := r_uops(i-1)
}
}
// handle outgoing (branch could still kill it)
// consumer must also check for pipeline flushes (kills)
io.resp.valid := r_valids(numStages-1) && !IsKilledByBranch(io.brupdate, r_uops(numStages-1))
io.resp.bits.predicated := false.B
io.resp.bits.uop := r_uops(numStages-1)
io.resp.bits.uop.br_mask := GetNewBrMask(io.brupdate, r_uops(numStages-1))
// bypassing (TODO allow bypass vector to have a different size from numStages)
if (numBypassStages > 0 && earliestBypassStage == 0) {
io.bypass(0).bits.uop := io.req.bits.uop
for (i <- 1 until numBypassStages) {
io.bypass(i).bits.uop := r_uops(i-1)
}
}
} else {
require (numStages == 0)
// pass req straight through to response
// valid doesn't check kill signals, let consumer deal with it.
// The LSU already handles it and this hurts critical path.
io.resp.valid := io.req.valid && !IsKilledByBranch(io.brupdate, io.req.bits.uop)
io.resp.bits.predicated := false.B
io.resp.bits.uop := io.req.bits.uop
io.resp.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.req.bits.uop)
}
}
/**
* Functional unit that wraps RocketChips ALU
*
* @param isBranchUnit is this a branch unit?
* @param numStages how many pipeline stages does the functional unit have
* @param dataWidth width of the data being operated on in the functional unit
*/
class ALUUnit(isJmpUnit: Boolean = false, numStages: Int = 1, dataWidth: Int)(implicit p: Parameters)
extends PipelinedFunctionalUnit(
numStages = numStages,
numBypassStages = numStages,
isAluUnit = true,
earliestBypassStage = 0,
dataWidth = dataWidth,
isJmpUnit = isJmpUnit)
with boom.v3.ifu.HasBoomFrontendParameters
{
val uop = io.req.bits.uop
// immediate generation
val imm_xprlen = ImmGen(uop.imm_packed, uop.ctrl.imm_sel)
// operand 1 select
var op1_data: UInt = null
if (isJmpUnit) {
// Get the uop PC for jumps
val block_pc = AlignPCToBoundary(io.get_ftq_pc.pc, icBlockBytes)
val uop_pc = (block_pc | uop.pc_lob) - Mux(uop.edge_inst, 2.U, 0.U)
op1_data = Mux(uop.ctrl.op1_sel.asUInt === OP1_RS1 , io.req.bits.rs1_data,
Mux(uop.ctrl.op1_sel.asUInt === OP1_PC , Sext(uop_pc, xLen),
0.U))
} else {
op1_data = Mux(uop.ctrl.op1_sel.asUInt === OP1_RS1 , io.req.bits.rs1_data,
0.U)
}
// operand 2 select
val op2_data = Mux(uop.ctrl.op2_sel === OP2_IMM, Sext(imm_xprlen.asUInt, xLen),
Mux(uop.ctrl.op2_sel === OP2_IMMC, io.req.bits.uop.prs1(4,0),
Mux(uop.ctrl.op2_sel === OP2_RS2 , io.req.bits.rs2_data,
Mux(uop.ctrl.op2_sel === OP2_NEXT, Mux(uop.is_rvc, 2.U, 4.U),
0.U))))
val alu = Module(new freechips.rocketchip.rocket.ALU())
alu.io.in1 := op1_data.asUInt
alu.io.in2 := op2_data.asUInt
alu.io.fn := uop.ctrl.op_fcn
alu.io.dw := uop.ctrl.fcn_dw
// Did I just get killed by the previous cycle's branch,
// or by a flush pipeline?
val killed = WireInit(false.B)
when (io.req.bits.kill || IsKilledByBranch(io.brupdate, uop)) {
killed := true.B
}
val rs1 = io.req.bits.rs1_data
val rs2 = io.req.bits.rs2_data
val br_eq = (rs1 === rs2)
val br_ltu = (rs1.asUInt < rs2.asUInt)
val br_lt = (~(rs1(xLen-1) ^ rs2(xLen-1)) & br_ltu |
rs1(xLen-1) & ~rs2(xLen-1)).asBool
val pc_sel = MuxLookup(uop.ctrl.br_type, PC_PLUS4)(
Seq( BR_N -> PC_PLUS4,
BR_NE -> Mux(!br_eq, PC_BRJMP, PC_PLUS4),
BR_EQ -> Mux( br_eq, PC_BRJMP, PC_PLUS4),
BR_GE -> Mux(!br_lt, PC_BRJMP, PC_PLUS4),
BR_GEU -> Mux(!br_ltu, PC_BRJMP, PC_PLUS4),
BR_LT -> Mux( br_lt, PC_BRJMP, PC_PLUS4),
BR_LTU -> Mux( br_ltu, PC_BRJMP, PC_PLUS4),
BR_J -> PC_BRJMP,
BR_JR -> PC_JALR
))
val is_taken = io.req.valid &&
!killed &&
(uop.is_br || uop.is_jalr || uop.is_jal) &&
(pc_sel =/= PC_PLUS4)
// "mispredict" means that a branch has been resolved and it must be killed
val mispredict = WireInit(false.B)
val is_br = io.req.valid && !killed && uop.is_br && !uop.is_sfb
val is_jal = io.req.valid && !killed && uop.is_jal
val is_jalr = io.req.valid && !killed && uop.is_jalr
when (is_br || is_jalr) {
if (!isJmpUnit) {
assert (pc_sel =/= PC_JALR)
}
when (pc_sel === PC_PLUS4) {
mispredict := uop.taken
}
when (pc_sel === PC_BRJMP) {
mispredict := !uop.taken
}
}
val brinfo = Wire(new BrResolutionInfo)
// note: jal doesn't allocate a branch-mask, so don't clear a br-mask bit
brinfo.valid := is_br || is_jalr
brinfo.mispredict := mispredict
brinfo.uop := uop
brinfo.cfi_type := Mux(is_jalr, CFI_JALR,
Mux(is_br , CFI_BR, CFI_X))
brinfo.taken := is_taken
brinfo.pc_sel := pc_sel
brinfo.jalr_target := DontCare
// Branch/Jump Target Calculation
// For jumps we read the FTQ, and can calculate the target
// For branches we emit the offset for the core to redirect if necessary
val target_offset = imm_xprlen(20,0).asSInt
brinfo.jalr_target := DontCare
if (isJmpUnit) {
def encodeVirtualAddress(a0: UInt, ea: UInt) = if (vaddrBitsExtended == vaddrBits) {
ea
} else {
// Efficient means to compress 64-bit VA into vaddrBits+1 bits.
// (VA is bad if VA(vaddrBits) != VA(vaddrBits-1)).
val a = a0.asSInt >> vaddrBits
val msb = Mux(a === 0.S || a === -1.S, ea(vaddrBits), !ea(vaddrBits-1))
Cat(msb, ea(vaddrBits-1,0))
}
val jalr_target_base = io.req.bits.rs1_data.asSInt
val jalr_target_xlen = Wire(UInt(xLen.W))
jalr_target_xlen := (jalr_target_base + target_offset).asUInt
val jalr_target = (encodeVirtualAddress(jalr_target_xlen, jalr_target_xlen).asSInt & -2.S).asUInt
brinfo.jalr_target := jalr_target
val cfi_idx = ((uop.pc_lob ^ Mux(io.get_ftq_pc.entry.start_bank === 1.U, 1.U << log2Ceil(bankBytes), 0.U)))(log2Ceil(fetchWidth),1)
when (pc_sel === PC_JALR) {
mispredict := !io.get_ftq_pc.next_val ||
(io.get_ftq_pc.next_pc =/= jalr_target) ||
!io.get_ftq_pc.entry.cfi_idx.valid ||
(io.get_ftq_pc.entry.cfi_idx.bits =/= cfi_idx)
}
}
brinfo.target_offset := target_offset
io.brinfo := brinfo
// Response
// TODO add clock gate on resp bits from functional units
// io.resp.bits.data := RegEnable(alu.io.out, io.req.valid)
// val reg_data = Reg(outType = Bits(width = xLen))
// reg_data := alu.io.out
// io.resp.bits.data := reg_data
val r_val = RegInit(VecInit(Seq.fill(numStages) { false.B }))
val r_data = Reg(Vec(numStages, UInt(xLen.W)))
val r_pred = Reg(Vec(numStages, Bool()))
val alu_out = Mux(io.req.bits.uop.is_sfb_shadow && io.req.bits.pred_data,
Mux(io.req.bits.uop.ldst_is_rs1, io.req.bits.rs1_data, io.req.bits.rs2_data),
Mux(io.req.bits.uop.uopc === uopMOV, io.req.bits.rs2_data, alu.io.out))
r_val (0) := io.req.valid
r_data(0) := Mux(io.req.bits.uop.is_sfb_br, pc_sel === PC_BRJMP, alu_out)
r_pred(0) := io.req.bits.uop.is_sfb_shadow && io.req.bits.pred_data
for (i <- 1 until numStages) {
r_val(i) := r_val(i-1)
r_data(i) := r_data(i-1)
r_pred(i) := r_pred(i-1)
}
io.resp.bits.data := r_data(numStages-1)
io.resp.bits.predicated := r_pred(numStages-1)
// Bypass
// for the ALU, we can bypass same cycle as compute
require (numStages >= 1)
require (numBypassStages >= 1)
io.bypass(0).valid := io.req.valid
io.bypass(0).bits.data := Mux(io.req.bits.uop.is_sfb_br, pc_sel === PC_BRJMP, alu_out)
for (i <- 1 until numStages) {
io.bypass(i).valid := r_val(i-1)
io.bypass(i).bits.data := r_data(i-1)
}
// Exceptions
io.resp.bits.fflags.valid := false.B
}
/**
* Functional unit that passes in base+imm to calculate addresses, and passes store data
* to the LSU.
* For floating point, 65bit FP store-data needs to be decoded into 64bit FP form
*/
class MemAddrCalcUnit(implicit p: Parameters)
extends PipelinedFunctionalUnit(
numStages = 0,
numBypassStages = 0,
earliestBypassStage = 0,
dataWidth = 65, // TODO enable this only if FP is enabled?
isMemAddrCalcUnit = true)
with freechips.rocketchip.rocket.constants.MemoryOpConstants
with freechips.rocketchip.rocket.constants.ScalarOpConstants
{
// perform address calculation
val sum = (io.req.bits.rs1_data.asSInt + io.req.bits.uop.imm_packed(19,8).asSInt).asUInt
val ea_sign = Mux(sum(vaddrBits-1), ~sum(63,vaddrBits) === 0.U,
sum(63,vaddrBits) =/= 0.U)
val effective_address = Cat(ea_sign, sum(vaddrBits-1,0)).asUInt
val store_data = io.req.bits.rs2_data
io.resp.bits.addr := effective_address
io.resp.bits.data := store_data
if (dataWidth > 63) {
assert (!(io.req.valid && io.req.bits.uop.ctrl.is_std &&
io.resp.bits.data(64).asBool === true.B), "65th bit set in MemAddrCalcUnit.")
assert (!(io.req.valid && io.req.bits.uop.ctrl.is_std && io.req.bits.uop.fp_val),
"FP store-data should now be going through a different unit.")
}
assert (!(io.req.bits.uop.fp_val && io.req.valid && io.req.bits.uop.uopc =/=
uopLD && io.req.bits.uop.uopc =/= uopSTA),
"[maddrcalc] assert we never get store data in here.")
// Handle misaligned exceptions
val size = io.req.bits.uop.mem_size
val misaligned =
(size === 1.U && (effective_address(0) =/= 0.U)) ||
(size === 2.U && (effective_address(1,0) =/= 0.U)) ||
(size === 3.U && (effective_address(2,0) =/= 0.U))
val bkptu = Module(new BreakpointUnit(nBreakpoints))
bkptu.io.status := io.status
bkptu.io.bp := io.bp
bkptu.io.pc := DontCare
bkptu.io.ea := effective_address
bkptu.io.mcontext := io.mcontext
bkptu.io.scontext := io.scontext
val ma_ld = io.req.valid && io.req.bits.uop.uopc === uopLD && misaligned
val ma_st = io.req.valid && (io.req.bits.uop.uopc === uopSTA || io.req.bits.uop.uopc === uopAMO_AG) && misaligned
val dbg_bp = io.req.valid && ((io.req.bits.uop.uopc === uopLD && bkptu.io.debug_ld) ||
(io.req.bits.uop.uopc === uopSTA && bkptu.io.debug_st))
val bp = io.req.valid && ((io.req.bits.uop.uopc === uopLD && bkptu.io.xcpt_ld) ||
(io.req.bits.uop.uopc === uopSTA && bkptu.io.xcpt_st))
def checkExceptions(x: Seq[(Bool, UInt)]) =
(x.map(_._1).reduce(_||_), PriorityMux(x))
val (xcpt_val, xcpt_cause) = checkExceptions(List(
(ma_ld, (Causes.misaligned_load).U),
(ma_st, (Causes.misaligned_store).U),
(dbg_bp, (CSR.debugTriggerCause).U),
(bp, (Causes.breakpoint).U)))
io.resp.bits.mxcpt.valid := xcpt_val
io.resp.bits.mxcpt.bits := xcpt_cause
assert (!(ma_ld && ma_st), "Mutually-exclusive exceptions are firing.")
io.resp.bits.sfence.valid := io.req.valid && io.req.bits.uop.mem_cmd === M_SFENCE
io.resp.bits.sfence.bits.rs1 := io.req.bits.uop.mem_size(0)
io.resp.bits.sfence.bits.rs2 := io.req.bits.uop.mem_size(1)
io.resp.bits.sfence.bits.addr := io.req.bits.rs1_data
io.resp.bits.sfence.bits.asid := io.req.bits.rs2_data
}
/**
* Functional unit to wrap lower level FPU
*
* Currently, bypassing is unsupported!
* All FP instructions are padded out to the max latency unit for easy
* write-port scheduling.
*/
class FPUUnit(implicit p: Parameters)
extends PipelinedFunctionalUnit(
numStages = p(tile.TileKey).core.fpu.get.dfmaLatency,
numBypassStages = 0,
earliestBypassStage = 0,
dataWidth = 65,
needsFcsr = true)
{
val fpu = Module(new FPU())
fpu.io.req.valid := io.req.valid
fpu.io.req.bits.uop := io.req.bits.uop
fpu.io.req.bits.rs1_data := io.req.bits.rs1_data
fpu.io.req.bits.rs2_data := io.req.bits.rs2_data
fpu.io.req.bits.rs3_data := io.req.bits.rs3_data
fpu.io.req.bits.fcsr_rm := io.fcsr_rm
io.resp.bits.data := fpu.io.resp.bits.data
io.resp.bits.fflags.valid := fpu.io.resp.bits.fflags.valid
io.resp.bits.fflags.bits.uop := io.resp.bits.uop
io.resp.bits.fflags.bits.flags := fpu.io.resp.bits.fflags.bits.flags // kill me now
}
/**
* Int to FP conversion functional unit
*
* @param latency the amount of stages to delay by
*/
class IntToFPUnit(latency: Int)(implicit p: Parameters)
extends PipelinedFunctionalUnit(
numStages = latency,
numBypassStages = 0,
earliestBypassStage = 0,
dataWidth = 65,
needsFcsr = true)
with tile.HasFPUParameters
{
val fp_decoder = Module(new UOPCodeFPUDecoder) // TODO use a simpler decoder
val io_req = io.req.bits
fp_decoder.io.uopc := io_req.uop.uopc
val fp_ctrl = fp_decoder.io.sigs
val fp_rm = Mux(ImmGenRm(io_req.uop.imm_packed) === 7.U, io.fcsr_rm, ImmGenRm(io_req.uop.imm_packed))
val req = Wire(new tile.FPInput)
val tag = fp_ctrl.typeTagIn
req.viewAsSupertype(new tile.FPUCtrlSigs) := fp_ctrl
req.rm := fp_rm
req.in1 := unbox(io_req.rs1_data, tag, None)
req.in2 := unbox(io_req.rs2_data, tag, None)
req.in3 := DontCare
req.typ := ImmGenTyp(io_req.uop.imm_packed)
req.fmt := DontCare // FIXME: this may not be the right thing to do here
req.fmaCmd := DontCare
assert (!(io.req.valid && fp_ctrl.fromint && req.in1(xLen).asBool),
"[func] IntToFP integer input has 65th high-order bit set!")
assert (!(io.req.valid && !fp_ctrl.fromint),
"[func] Only support fromInt micro-ops.")
val ifpu = Module(new tile.IntToFP(intToFpLatency))
ifpu.io.in.valid := io.req.valid
ifpu.io.in.bits := req
ifpu.io.in.bits.in1 := io_req.rs1_data
val out_double = Pipe(io.req.valid, fp_ctrl.typeTagOut === D, intToFpLatency).bits
//io.resp.bits.data := box(ifpu.io.out.bits.data, !io.resp.bits.uop.fp_single)
io.resp.bits.data := box(ifpu.io.out.bits.data, out_double)
io.resp.bits.fflags.valid := ifpu.io.out.valid
io.resp.bits.fflags.bits.uop := io.resp.bits.uop
io.resp.bits.fflags.bits.flags := ifpu.io.out.bits.exc
}
/**
* Iterative/unpipelined functional unit, can only hold a single MicroOp at a time
* assumes at least one register between request and response
*
* TODO allow up to N micro-ops simultaneously.
*
* @param dataWidth width of the data to be passed into the functional unit
*/
abstract class IterativeFunctionalUnit(dataWidth: Int)(implicit p: Parameters)
extends FunctionalUnit(
isPipelined = false,
numStages = 1,
numBypassStages = 0,
dataWidth = dataWidth)
{
val r_uop = Reg(new MicroOp())
val do_kill = Wire(Bool())
do_kill := io.req.bits.kill // irrelevant default
when (io.req.fire) {
// update incoming uop
do_kill := IsKilledByBranch(io.brupdate, io.req.bits.uop) || io.req.bits.kill
r_uop := io.req.bits.uop
r_uop.br_mask := GetNewBrMask(io.brupdate, io.req.bits.uop)
} .otherwise {
do_kill := IsKilledByBranch(io.brupdate, r_uop) || io.req.bits.kill
r_uop.br_mask := GetNewBrMask(io.brupdate, r_uop)
}
// assumes at least one pipeline register between request and response
io.resp.bits.uop := r_uop
}
/**
* Divide functional unit.
*
* @param dataWidth data to be passed into the functional unit
*/
class DivUnit(dataWidth: Int)(implicit p: Parameters)
extends IterativeFunctionalUnit(dataWidth)
{
// We don't use the iterative multiply functionality here.
// Instead we use the PipelinedMultiplier
val div = Module(new freechips.rocketchip.rocket.MulDiv(mulDivParams, width = dataWidth))
// request
div.io.req.valid := io.req.valid && !this.do_kill
div.io.req.bits.dw := io.req.bits.uop.ctrl.fcn_dw
div.io.req.bits.fn := io.req.bits.uop.ctrl.op_fcn
div.io.req.bits.in1 := io.req.bits.rs1_data
div.io.req.bits.in2 := io.req.bits.rs2_data
div.io.req.bits.tag := DontCare
io.req.ready := div.io.req.ready
// handle pipeline kills and branch misspeculations
div.io.kill := this.do_kill
// response
io.resp.valid := div.io.resp.valid && !this.do_kill
div.io.resp.ready := io.resp.ready
io.resp.bits.data := div.io.resp.bits.data
}
/**
* Pipelined multiplier functional unit that wraps around the RocketChip pipelined multiplier
*
* @param numStages number of pipeline stages
* @param dataWidth size of the data being passed into the functional unit
*/
class PipelinedMulUnit(numStages: Int, dataWidth: Int)(implicit p: Parameters)
extends PipelinedFunctionalUnit(
numStages = numStages,
numBypassStages = 0,
earliestBypassStage = 0,
dataWidth = dataWidth)
{
val imul = Module(new PipelinedMultiplier(xLen, numStages))
// request
imul.io.req.valid := io.req.valid
imul.io.req.bits.fn := io.req.bits.uop.ctrl.op_fcn
imul.io.req.bits.dw := io.req.bits.uop.ctrl.fcn_dw
imul.io.req.bits.in1 := io.req.bits.rs1_data
imul.io.req.bits.in2 := io.req.bits.rs2_data
imul.io.req.bits.tag := DontCare
// response
io.resp.bits.data := imul.io.resp.bits.data
}
| module MemAddrCalcUnit_1( // @[functional-unit.scala:482:7]
input clock, // @[functional-unit.scala:482:7]
input reset, // @[functional-unit.scala:482:7]
input io_req_valid, // @[functional-unit.scala:168:14]
input [6:0] io_req_bits_uop_uopc, // @[functional-unit.scala:168:14]
input [31:0] io_req_bits_uop_inst, // @[functional-unit.scala:168:14]
input [31:0] io_req_bits_uop_debug_inst, // @[functional-unit.scala:168:14]
input io_req_bits_uop_is_rvc, // @[functional-unit.scala:168:14]
input [39:0] io_req_bits_uop_debug_pc, // @[functional-unit.scala:168:14]
input [2:0] io_req_bits_uop_iq_type, // @[functional-unit.scala:168:14]
input [9:0] io_req_bits_uop_fu_code, // @[functional-unit.scala:168:14]
input [3:0] io_req_bits_uop_ctrl_br_type, // @[functional-unit.scala:168:14]
input [1:0] io_req_bits_uop_ctrl_op1_sel, // @[functional-unit.scala:168:14]
input [2:0] io_req_bits_uop_ctrl_op2_sel, // @[functional-unit.scala:168:14]
input [2:0] io_req_bits_uop_ctrl_imm_sel, // @[functional-unit.scala:168:14]
input [4:0] io_req_bits_uop_ctrl_op_fcn, // @[functional-unit.scala:168:14]
input io_req_bits_uop_ctrl_fcn_dw, // @[functional-unit.scala:168:14]
input [2:0] io_req_bits_uop_ctrl_csr_cmd, // @[functional-unit.scala:168:14]
input io_req_bits_uop_ctrl_is_load, // @[functional-unit.scala:168:14]
input io_req_bits_uop_ctrl_is_sta, // @[functional-unit.scala:168:14]
input io_req_bits_uop_ctrl_is_std, // @[functional-unit.scala:168:14]
input [1:0] io_req_bits_uop_iw_state, // @[functional-unit.scala:168:14]
input io_req_bits_uop_iw_p1_poisoned, // @[functional-unit.scala:168:14]
input io_req_bits_uop_iw_p2_poisoned, // @[functional-unit.scala:168:14]
input io_req_bits_uop_is_br, // @[functional-unit.scala:168:14]
input io_req_bits_uop_is_jalr, // @[functional-unit.scala:168:14]
input io_req_bits_uop_is_jal, // @[functional-unit.scala:168:14]
input io_req_bits_uop_is_sfb, // @[functional-unit.scala:168:14]
input [7:0] io_req_bits_uop_br_mask, // @[functional-unit.scala:168:14]
input [2:0] io_req_bits_uop_br_tag, // @[functional-unit.scala:168:14]
input [3:0] io_req_bits_uop_ftq_idx, // @[functional-unit.scala:168:14]
input io_req_bits_uop_edge_inst, // @[functional-unit.scala:168:14]
input [5:0] io_req_bits_uop_pc_lob, // @[functional-unit.scala:168:14]
input io_req_bits_uop_taken, // @[functional-unit.scala:168:14]
input [19:0] io_req_bits_uop_imm_packed, // @[functional-unit.scala:168:14]
input [11:0] io_req_bits_uop_csr_addr, // @[functional-unit.scala:168:14]
input [4:0] io_req_bits_uop_rob_idx, // @[functional-unit.scala:168:14]
input [2:0] io_req_bits_uop_ldq_idx, // @[functional-unit.scala:168:14]
input [2:0] io_req_bits_uop_stq_idx, // @[functional-unit.scala:168:14]
input [1:0] io_req_bits_uop_rxq_idx, // @[functional-unit.scala:168:14]
input [5:0] io_req_bits_uop_pdst, // @[functional-unit.scala:168:14]
input [5:0] io_req_bits_uop_prs1, // @[functional-unit.scala:168:14]
input [5:0] io_req_bits_uop_prs2, // @[functional-unit.scala:168:14]
input [5:0] io_req_bits_uop_prs3, // @[functional-unit.scala:168:14]
input [3:0] io_req_bits_uop_ppred, // @[functional-unit.scala:168:14]
input io_req_bits_uop_prs1_busy, // @[functional-unit.scala:168:14]
input io_req_bits_uop_prs2_busy, // @[functional-unit.scala:168:14]
input io_req_bits_uop_prs3_busy, // @[functional-unit.scala:168:14]
input io_req_bits_uop_ppred_busy, // @[functional-unit.scala:168:14]
input [5:0] io_req_bits_uop_stale_pdst, // @[functional-unit.scala:168:14]
input io_req_bits_uop_exception, // @[functional-unit.scala:168:14]
input [63:0] io_req_bits_uop_exc_cause, // @[functional-unit.scala:168:14]
input io_req_bits_uop_bypassable, // @[functional-unit.scala:168:14]
input [4:0] io_req_bits_uop_mem_cmd, // @[functional-unit.scala:168:14]
input [1:0] io_req_bits_uop_mem_size, // @[functional-unit.scala:168:14]
input io_req_bits_uop_mem_signed, // @[functional-unit.scala:168:14]
input io_req_bits_uop_is_fence, // @[functional-unit.scala:168:14]
input io_req_bits_uop_is_fencei, // @[functional-unit.scala:168:14]
input io_req_bits_uop_is_amo, // @[functional-unit.scala:168:14]
input io_req_bits_uop_uses_ldq, // @[functional-unit.scala:168:14]
input io_req_bits_uop_uses_stq, // @[functional-unit.scala:168:14]
input io_req_bits_uop_is_sys_pc2epc, // @[functional-unit.scala:168:14]
input io_req_bits_uop_is_unique, // @[functional-unit.scala:168:14]
input io_req_bits_uop_flush_on_commit, // @[functional-unit.scala:168:14]
input io_req_bits_uop_ldst_is_rs1, // @[functional-unit.scala:168:14]
input [5:0] io_req_bits_uop_ldst, // @[functional-unit.scala:168:14]
input [5:0] io_req_bits_uop_lrs1, // @[functional-unit.scala:168:14]
input [5:0] io_req_bits_uop_lrs2, // @[functional-unit.scala:168:14]
input [5:0] io_req_bits_uop_lrs3, // @[functional-unit.scala:168:14]
input io_req_bits_uop_ldst_val, // @[functional-unit.scala:168:14]
input [1:0] io_req_bits_uop_dst_rtype, // @[functional-unit.scala:168:14]
input [1:0] io_req_bits_uop_lrs1_rtype, // @[functional-unit.scala:168:14]
input [1:0] io_req_bits_uop_lrs2_rtype, // @[functional-unit.scala:168:14]
input io_req_bits_uop_frs3_en, // @[functional-unit.scala:168:14]
input io_req_bits_uop_fp_val, // @[functional-unit.scala:168:14]
input io_req_bits_uop_fp_single, // @[functional-unit.scala:168:14]
input io_req_bits_uop_xcpt_pf_if, // @[functional-unit.scala:168:14]
input io_req_bits_uop_xcpt_ae_if, // @[functional-unit.scala:168:14]
input io_req_bits_uop_xcpt_ma_if, // @[functional-unit.scala:168:14]
input io_req_bits_uop_bp_debug_if, // @[functional-unit.scala:168:14]
input io_req_bits_uop_bp_xcpt_if, // @[functional-unit.scala:168:14]
input [1:0] io_req_bits_uop_debug_fsrc, // @[functional-unit.scala:168:14]
input [1:0] io_req_bits_uop_debug_tsrc, // @[functional-unit.scala:168:14]
input [64:0] io_req_bits_rs1_data, // @[functional-unit.scala:168:14]
input [64:0] io_req_bits_rs2_data, // @[functional-unit.scala:168:14]
input io_req_bits_kill, // @[functional-unit.scala:168:14]
output io_resp_valid, // @[functional-unit.scala:168:14]
output [6:0] io_resp_bits_uop_uopc, // @[functional-unit.scala:168:14]
output [31:0] io_resp_bits_uop_inst, // @[functional-unit.scala:168:14]
output [31:0] io_resp_bits_uop_debug_inst, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_is_rvc, // @[functional-unit.scala:168:14]
output [39:0] io_resp_bits_uop_debug_pc, // @[functional-unit.scala:168:14]
output [2:0] io_resp_bits_uop_iq_type, // @[functional-unit.scala:168:14]
output [9:0] io_resp_bits_uop_fu_code, // @[functional-unit.scala:168:14]
output [3:0] io_resp_bits_uop_ctrl_br_type, // @[functional-unit.scala:168:14]
output [1:0] io_resp_bits_uop_ctrl_op1_sel, // @[functional-unit.scala:168:14]
output [2:0] io_resp_bits_uop_ctrl_op2_sel, // @[functional-unit.scala:168:14]
output [2:0] io_resp_bits_uop_ctrl_imm_sel, // @[functional-unit.scala:168:14]
output [4:0] io_resp_bits_uop_ctrl_op_fcn, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_ctrl_fcn_dw, // @[functional-unit.scala:168:14]
output [2:0] io_resp_bits_uop_ctrl_csr_cmd, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_ctrl_is_load, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_ctrl_is_sta, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_ctrl_is_std, // @[functional-unit.scala:168:14]
output [1:0] io_resp_bits_uop_iw_state, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_iw_p1_poisoned, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_iw_p2_poisoned, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_is_br, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_is_jalr, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_is_jal, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_is_sfb, // @[functional-unit.scala:168:14]
output [7:0] io_resp_bits_uop_br_mask, // @[functional-unit.scala:168:14]
output [2:0] io_resp_bits_uop_br_tag, // @[functional-unit.scala:168:14]
output [3:0] io_resp_bits_uop_ftq_idx, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_edge_inst, // @[functional-unit.scala:168:14]
output [5:0] io_resp_bits_uop_pc_lob, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_taken, // @[functional-unit.scala:168:14]
output [19:0] io_resp_bits_uop_imm_packed, // @[functional-unit.scala:168:14]
output [11:0] io_resp_bits_uop_csr_addr, // @[functional-unit.scala:168:14]
output [4:0] io_resp_bits_uop_rob_idx, // @[functional-unit.scala:168:14]
output [2:0] io_resp_bits_uop_ldq_idx, // @[functional-unit.scala:168:14]
output [2:0] io_resp_bits_uop_stq_idx, // @[functional-unit.scala:168:14]
output [1:0] io_resp_bits_uop_rxq_idx, // @[functional-unit.scala:168:14]
output [5:0] io_resp_bits_uop_pdst, // @[functional-unit.scala:168:14]
output [5:0] io_resp_bits_uop_prs1, // @[functional-unit.scala:168:14]
output [5:0] io_resp_bits_uop_prs2, // @[functional-unit.scala:168:14]
output [5:0] io_resp_bits_uop_prs3, // @[functional-unit.scala:168:14]
output [3:0] io_resp_bits_uop_ppred, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_prs1_busy, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_prs2_busy, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_prs3_busy, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_ppred_busy, // @[functional-unit.scala:168:14]
output [5:0] io_resp_bits_uop_stale_pdst, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_exception, // @[functional-unit.scala:168:14]
output [63:0] io_resp_bits_uop_exc_cause, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_bypassable, // @[functional-unit.scala:168:14]
output [4:0] io_resp_bits_uop_mem_cmd, // @[functional-unit.scala:168:14]
output [1:0] io_resp_bits_uop_mem_size, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_mem_signed, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_is_fence, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_is_fencei, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_is_amo, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_uses_ldq, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_uses_stq, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_is_sys_pc2epc, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_is_unique, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_flush_on_commit, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_ldst_is_rs1, // @[functional-unit.scala:168:14]
output [5:0] io_resp_bits_uop_ldst, // @[functional-unit.scala:168:14]
output [5:0] io_resp_bits_uop_lrs1, // @[functional-unit.scala:168:14]
output [5:0] io_resp_bits_uop_lrs2, // @[functional-unit.scala:168:14]
output [5:0] io_resp_bits_uop_lrs3, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_ldst_val, // @[functional-unit.scala:168:14]
output [1:0] io_resp_bits_uop_dst_rtype, // @[functional-unit.scala:168:14]
output [1:0] io_resp_bits_uop_lrs1_rtype, // @[functional-unit.scala:168:14]
output [1:0] io_resp_bits_uop_lrs2_rtype, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_frs3_en, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_fp_val, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_fp_single, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_xcpt_pf_if, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_xcpt_ae_if, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_xcpt_ma_if, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_bp_debug_if, // @[functional-unit.scala:168:14]
output io_resp_bits_uop_bp_xcpt_if, // @[functional-unit.scala:168:14]
output [1:0] io_resp_bits_uop_debug_fsrc, // @[functional-unit.scala:168:14]
output [1:0] io_resp_bits_uop_debug_tsrc, // @[functional-unit.scala:168:14]
output [64:0] io_resp_bits_data, // @[functional-unit.scala:168:14]
output [39:0] io_resp_bits_addr, // @[functional-unit.scala:168:14]
output io_resp_bits_mxcpt_valid, // @[functional-unit.scala:168:14]
output [24:0] io_resp_bits_mxcpt_bits, // @[functional-unit.scala:168:14]
output io_resp_bits_sfence_valid, // @[functional-unit.scala:168:14]
output io_resp_bits_sfence_bits_rs1, // @[functional-unit.scala:168:14]
output io_resp_bits_sfence_bits_rs2, // @[functional-unit.scala:168:14]
output [38:0] io_resp_bits_sfence_bits_addr, // @[functional-unit.scala:168:14]
output io_resp_bits_sfence_bits_asid, // @[functional-unit.scala:168:14]
input [7:0] io_brupdate_b1_resolve_mask, // @[functional-unit.scala:168:14]
input [7:0] io_brupdate_b1_mispredict_mask, // @[functional-unit.scala:168:14]
input [6:0] io_brupdate_b2_uop_uopc, // @[functional-unit.scala:168:14]
input [31:0] io_brupdate_b2_uop_inst, // @[functional-unit.scala:168:14]
input [31:0] io_brupdate_b2_uop_debug_inst, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_is_rvc, // @[functional-unit.scala:168:14]
input [39:0] io_brupdate_b2_uop_debug_pc, // @[functional-unit.scala:168:14]
input [2:0] io_brupdate_b2_uop_iq_type, // @[functional-unit.scala:168:14]
input [9:0] io_brupdate_b2_uop_fu_code, // @[functional-unit.scala:168:14]
input [3:0] io_brupdate_b2_uop_ctrl_br_type, // @[functional-unit.scala:168:14]
input [1:0] io_brupdate_b2_uop_ctrl_op1_sel, // @[functional-unit.scala:168:14]
input [2:0] io_brupdate_b2_uop_ctrl_op2_sel, // @[functional-unit.scala:168:14]
input [2:0] io_brupdate_b2_uop_ctrl_imm_sel, // @[functional-unit.scala:168:14]
input [4:0] io_brupdate_b2_uop_ctrl_op_fcn, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_ctrl_fcn_dw, // @[functional-unit.scala:168:14]
input [2:0] io_brupdate_b2_uop_ctrl_csr_cmd, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_ctrl_is_load, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_ctrl_is_sta, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_ctrl_is_std, // @[functional-unit.scala:168:14]
input [1:0] io_brupdate_b2_uop_iw_state, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_iw_p1_poisoned, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_iw_p2_poisoned, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_is_br, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_is_jalr, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_is_jal, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_is_sfb, // @[functional-unit.scala:168:14]
input [7:0] io_brupdate_b2_uop_br_mask, // @[functional-unit.scala:168:14]
input [2:0] io_brupdate_b2_uop_br_tag, // @[functional-unit.scala:168:14]
input [3:0] io_brupdate_b2_uop_ftq_idx, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_edge_inst, // @[functional-unit.scala:168:14]
input [5:0] io_brupdate_b2_uop_pc_lob, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_taken, // @[functional-unit.scala:168:14]
input [19:0] io_brupdate_b2_uop_imm_packed, // @[functional-unit.scala:168:14]
input [11:0] io_brupdate_b2_uop_csr_addr, // @[functional-unit.scala:168:14]
input [4:0] io_brupdate_b2_uop_rob_idx, // @[functional-unit.scala:168:14]
input [2:0] io_brupdate_b2_uop_ldq_idx, // @[functional-unit.scala:168:14]
input [2:0] io_brupdate_b2_uop_stq_idx, // @[functional-unit.scala:168:14]
input [1:0] io_brupdate_b2_uop_rxq_idx, // @[functional-unit.scala:168:14]
input [5:0] io_brupdate_b2_uop_pdst, // @[functional-unit.scala:168:14]
input [5:0] io_brupdate_b2_uop_prs1, // @[functional-unit.scala:168:14]
input [5:0] io_brupdate_b2_uop_prs2, // @[functional-unit.scala:168:14]
input [5:0] io_brupdate_b2_uop_prs3, // @[functional-unit.scala:168:14]
input [3:0] io_brupdate_b2_uop_ppred, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_prs1_busy, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_prs2_busy, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_prs3_busy, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_ppred_busy, // @[functional-unit.scala:168:14]
input [5:0] io_brupdate_b2_uop_stale_pdst, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_exception, // @[functional-unit.scala:168:14]
input [63:0] io_brupdate_b2_uop_exc_cause, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_bypassable, // @[functional-unit.scala:168:14]
input [4:0] io_brupdate_b2_uop_mem_cmd, // @[functional-unit.scala:168:14]
input [1:0] io_brupdate_b2_uop_mem_size, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_mem_signed, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_is_fence, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_is_fencei, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_is_amo, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_uses_ldq, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_uses_stq, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_is_sys_pc2epc, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_is_unique, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_flush_on_commit, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_ldst_is_rs1, // @[functional-unit.scala:168:14]
input [5:0] io_brupdate_b2_uop_ldst, // @[functional-unit.scala:168:14]
input [5:0] io_brupdate_b2_uop_lrs1, // @[functional-unit.scala:168:14]
input [5:0] io_brupdate_b2_uop_lrs2, // @[functional-unit.scala:168:14]
input [5:0] io_brupdate_b2_uop_lrs3, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_ldst_val, // @[functional-unit.scala:168:14]
input [1:0] io_brupdate_b2_uop_dst_rtype, // @[functional-unit.scala:168:14]
input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[functional-unit.scala:168:14]
input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_frs3_en, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_fp_val, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_fp_single, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_xcpt_pf_if, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_xcpt_ae_if, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_xcpt_ma_if, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_bp_debug_if, // @[functional-unit.scala:168:14]
input io_brupdate_b2_uop_bp_xcpt_if, // @[functional-unit.scala:168:14]
input [1:0] io_brupdate_b2_uop_debug_fsrc, // @[functional-unit.scala:168:14]
input [1:0] io_brupdate_b2_uop_debug_tsrc, // @[functional-unit.scala:168:14]
input io_brupdate_b2_valid, // @[functional-unit.scala:168:14]
input io_brupdate_b2_mispredict, // @[functional-unit.scala:168:14]
input io_brupdate_b2_taken, // @[functional-unit.scala:168:14]
input [2:0] io_brupdate_b2_cfi_type, // @[functional-unit.scala:168:14]
input [1:0] io_brupdate_b2_pc_sel, // @[functional-unit.scala:168:14]
input [39:0] io_brupdate_b2_jalr_target, // @[functional-unit.scala:168:14]
input [20:0] io_brupdate_b2_target_offset, // @[functional-unit.scala:168:14]
input io_status_debug, // @[functional-unit.scala:168:14]
input io_status_cease, // @[functional-unit.scala:168:14]
input io_status_wfi, // @[functional-unit.scala:168:14]
input [1:0] io_status_dprv, // @[functional-unit.scala:168:14]
input io_status_dv, // @[functional-unit.scala:168:14]
input [1:0] io_status_prv, // @[functional-unit.scala:168:14]
input io_status_v, // @[functional-unit.scala:168:14]
input io_status_sd, // @[functional-unit.scala:168:14]
input io_status_mpv, // @[functional-unit.scala:168:14]
input io_status_gva, // @[functional-unit.scala:168:14]
input io_status_tsr, // @[functional-unit.scala:168:14]
input io_status_tw, // @[functional-unit.scala:168:14]
input io_status_tvm, // @[functional-unit.scala:168:14]
input io_status_mxr, // @[functional-unit.scala:168:14]
input io_status_sum, // @[functional-unit.scala:168:14]
input io_status_mprv, // @[functional-unit.scala:168:14]
input [1:0] io_status_fs, // @[functional-unit.scala:168:14]
input [1:0] io_status_mpp, // @[functional-unit.scala:168:14]
input io_status_spp, // @[functional-unit.scala:168:14]
input io_status_mpie, // @[functional-unit.scala:168:14]
input io_status_spie, // @[functional-unit.scala:168:14]
input io_status_mie, // @[functional-unit.scala:168:14]
input io_status_sie // @[functional-unit.scala:168:14]
);
wire io_req_valid_0 = io_req_valid; // @[functional-unit.scala:482:7]
wire [6:0] io_req_bits_uop_uopc_0 = io_req_bits_uop_uopc; // @[functional-unit.scala:482:7]
wire [31:0] io_req_bits_uop_inst_0 = io_req_bits_uop_inst; // @[functional-unit.scala:482:7]
wire [31:0] io_req_bits_uop_debug_inst_0 = io_req_bits_uop_debug_inst; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_is_rvc_0 = io_req_bits_uop_is_rvc; // @[functional-unit.scala:482:7]
wire [39:0] io_req_bits_uop_debug_pc_0 = io_req_bits_uop_debug_pc; // @[functional-unit.scala:482:7]
wire [2:0] io_req_bits_uop_iq_type_0 = io_req_bits_uop_iq_type; // @[functional-unit.scala:482:7]
wire [9:0] io_req_bits_uop_fu_code_0 = io_req_bits_uop_fu_code; // @[functional-unit.scala:482:7]
wire [3:0] io_req_bits_uop_ctrl_br_type_0 = io_req_bits_uop_ctrl_br_type; // @[functional-unit.scala:482:7]
wire [1:0] io_req_bits_uop_ctrl_op1_sel_0 = io_req_bits_uop_ctrl_op1_sel; // @[functional-unit.scala:482:7]
wire [2:0] io_req_bits_uop_ctrl_op2_sel_0 = io_req_bits_uop_ctrl_op2_sel; // @[functional-unit.scala:482:7]
wire [2:0] io_req_bits_uop_ctrl_imm_sel_0 = io_req_bits_uop_ctrl_imm_sel; // @[functional-unit.scala:482:7]
wire [4:0] io_req_bits_uop_ctrl_op_fcn_0 = io_req_bits_uop_ctrl_op_fcn; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_ctrl_fcn_dw_0 = io_req_bits_uop_ctrl_fcn_dw; // @[functional-unit.scala:482:7]
wire [2:0] io_req_bits_uop_ctrl_csr_cmd_0 = io_req_bits_uop_ctrl_csr_cmd; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_ctrl_is_load_0 = io_req_bits_uop_ctrl_is_load; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_ctrl_is_sta_0 = io_req_bits_uop_ctrl_is_sta; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_ctrl_is_std_0 = io_req_bits_uop_ctrl_is_std; // @[functional-unit.scala:482:7]
wire [1:0] io_req_bits_uop_iw_state_0 = io_req_bits_uop_iw_state; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_iw_p1_poisoned_0 = io_req_bits_uop_iw_p1_poisoned; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_iw_p2_poisoned_0 = io_req_bits_uop_iw_p2_poisoned; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_is_br_0 = io_req_bits_uop_is_br; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_is_jalr_0 = io_req_bits_uop_is_jalr; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_is_jal_0 = io_req_bits_uop_is_jal; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_is_sfb_0 = io_req_bits_uop_is_sfb; // @[functional-unit.scala:482:7]
wire [7:0] io_req_bits_uop_br_mask_0 = io_req_bits_uop_br_mask; // @[functional-unit.scala:482:7]
wire [2:0] io_req_bits_uop_br_tag_0 = io_req_bits_uop_br_tag; // @[functional-unit.scala:482:7]
wire [3:0] io_req_bits_uop_ftq_idx_0 = io_req_bits_uop_ftq_idx; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_edge_inst_0 = io_req_bits_uop_edge_inst; // @[functional-unit.scala:482:7]
wire [5:0] io_req_bits_uop_pc_lob_0 = io_req_bits_uop_pc_lob; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_taken_0 = io_req_bits_uop_taken; // @[functional-unit.scala:482:7]
wire [19:0] io_req_bits_uop_imm_packed_0 = io_req_bits_uop_imm_packed; // @[functional-unit.scala:482:7]
wire [11:0] io_req_bits_uop_csr_addr_0 = io_req_bits_uop_csr_addr; // @[functional-unit.scala:482:7]
wire [4:0] io_req_bits_uop_rob_idx_0 = io_req_bits_uop_rob_idx; // @[functional-unit.scala:482:7]
wire [2:0] io_req_bits_uop_ldq_idx_0 = io_req_bits_uop_ldq_idx; // @[functional-unit.scala:482:7]
wire [2:0] io_req_bits_uop_stq_idx_0 = io_req_bits_uop_stq_idx; // @[functional-unit.scala:482:7]
wire [1:0] io_req_bits_uop_rxq_idx_0 = io_req_bits_uop_rxq_idx; // @[functional-unit.scala:482:7]
wire [5:0] io_req_bits_uop_pdst_0 = io_req_bits_uop_pdst; // @[functional-unit.scala:482:7]
wire [5:0] io_req_bits_uop_prs1_0 = io_req_bits_uop_prs1; // @[functional-unit.scala:482:7]
wire [5:0] io_req_bits_uop_prs2_0 = io_req_bits_uop_prs2; // @[functional-unit.scala:482:7]
wire [5:0] io_req_bits_uop_prs3_0 = io_req_bits_uop_prs3; // @[functional-unit.scala:482:7]
wire [3:0] io_req_bits_uop_ppred_0 = io_req_bits_uop_ppred; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_prs1_busy_0 = io_req_bits_uop_prs1_busy; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_prs2_busy_0 = io_req_bits_uop_prs2_busy; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_prs3_busy_0 = io_req_bits_uop_prs3_busy; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_ppred_busy_0 = io_req_bits_uop_ppred_busy; // @[functional-unit.scala:482:7]
wire [5:0] io_req_bits_uop_stale_pdst_0 = io_req_bits_uop_stale_pdst; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_exception_0 = io_req_bits_uop_exception; // @[functional-unit.scala:482:7]
wire [63:0] io_req_bits_uop_exc_cause_0 = io_req_bits_uop_exc_cause; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_bypassable_0 = io_req_bits_uop_bypassable; // @[functional-unit.scala:482:7]
wire [4:0] io_req_bits_uop_mem_cmd_0 = io_req_bits_uop_mem_cmd; // @[functional-unit.scala:482:7]
wire [1:0] io_req_bits_uop_mem_size_0 = io_req_bits_uop_mem_size; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_mem_signed_0 = io_req_bits_uop_mem_signed; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_is_fence_0 = io_req_bits_uop_is_fence; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_is_fencei_0 = io_req_bits_uop_is_fencei; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_is_amo_0 = io_req_bits_uop_is_amo; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_uses_ldq_0 = io_req_bits_uop_uses_ldq; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_uses_stq_0 = io_req_bits_uop_uses_stq; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_is_sys_pc2epc_0 = io_req_bits_uop_is_sys_pc2epc; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_is_unique_0 = io_req_bits_uop_is_unique; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_flush_on_commit_0 = io_req_bits_uop_flush_on_commit; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_ldst_is_rs1_0 = io_req_bits_uop_ldst_is_rs1; // @[functional-unit.scala:482:7]
wire [5:0] io_req_bits_uop_ldst_0 = io_req_bits_uop_ldst; // @[functional-unit.scala:482:7]
wire [5:0] io_req_bits_uop_lrs1_0 = io_req_bits_uop_lrs1; // @[functional-unit.scala:482:7]
wire [5:0] io_req_bits_uop_lrs2_0 = io_req_bits_uop_lrs2; // @[functional-unit.scala:482:7]
wire [5:0] io_req_bits_uop_lrs3_0 = io_req_bits_uop_lrs3; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_ldst_val_0 = io_req_bits_uop_ldst_val; // @[functional-unit.scala:482:7]
wire [1:0] io_req_bits_uop_dst_rtype_0 = io_req_bits_uop_dst_rtype; // @[functional-unit.scala:482:7]
wire [1:0] io_req_bits_uop_lrs1_rtype_0 = io_req_bits_uop_lrs1_rtype; // @[functional-unit.scala:482:7]
wire [1:0] io_req_bits_uop_lrs2_rtype_0 = io_req_bits_uop_lrs2_rtype; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_frs3_en_0 = io_req_bits_uop_frs3_en; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_fp_val_0 = io_req_bits_uop_fp_val; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_fp_single_0 = io_req_bits_uop_fp_single; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_xcpt_pf_if_0 = io_req_bits_uop_xcpt_pf_if; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_xcpt_ae_if_0 = io_req_bits_uop_xcpt_ae_if; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_xcpt_ma_if_0 = io_req_bits_uop_xcpt_ma_if; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_bp_debug_if_0 = io_req_bits_uop_bp_debug_if; // @[functional-unit.scala:482:7]
wire io_req_bits_uop_bp_xcpt_if_0 = io_req_bits_uop_bp_xcpt_if; // @[functional-unit.scala:482:7]
wire [1:0] io_req_bits_uop_debug_fsrc_0 = io_req_bits_uop_debug_fsrc; // @[functional-unit.scala:482:7]
wire [1:0] io_req_bits_uop_debug_tsrc_0 = io_req_bits_uop_debug_tsrc; // @[functional-unit.scala:482:7]
wire [64:0] io_req_bits_rs1_data_0 = io_req_bits_rs1_data; // @[functional-unit.scala:482:7]
wire [64:0] io_req_bits_rs2_data_0 = io_req_bits_rs2_data; // @[functional-unit.scala:482:7]
wire io_req_bits_kill_0 = io_req_bits_kill; // @[functional-unit.scala:482:7]
wire [7:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[functional-unit.scala:482:7]
wire [7:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[functional-unit.scala:482:7]
wire [6:0] io_brupdate_b2_uop_uopc_0 = io_brupdate_b2_uop_uopc; // @[functional-unit.scala:482:7]
wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[functional-unit.scala:482:7]
wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[functional-unit.scala:482:7]
wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[functional-unit.scala:482:7]
wire [2:0] io_brupdate_b2_uop_iq_type_0 = io_brupdate_b2_uop_iq_type; // @[functional-unit.scala:482:7]
wire [9:0] io_brupdate_b2_uop_fu_code_0 = io_brupdate_b2_uop_fu_code; // @[functional-unit.scala:482:7]
wire [3:0] io_brupdate_b2_uop_ctrl_br_type_0 = io_brupdate_b2_uop_ctrl_br_type; // @[functional-unit.scala:482:7]
wire [1:0] io_brupdate_b2_uop_ctrl_op1_sel_0 = io_brupdate_b2_uop_ctrl_op1_sel; // @[functional-unit.scala:482:7]
wire [2:0] io_brupdate_b2_uop_ctrl_op2_sel_0 = io_brupdate_b2_uop_ctrl_op2_sel; // @[functional-unit.scala:482:7]
wire [2:0] io_brupdate_b2_uop_ctrl_imm_sel_0 = io_brupdate_b2_uop_ctrl_imm_sel; // @[functional-unit.scala:482:7]
wire [4:0] io_brupdate_b2_uop_ctrl_op_fcn_0 = io_brupdate_b2_uop_ctrl_op_fcn; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_ctrl_fcn_dw_0 = io_brupdate_b2_uop_ctrl_fcn_dw; // @[functional-unit.scala:482:7]
wire [2:0] io_brupdate_b2_uop_ctrl_csr_cmd_0 = io_brupdate_b2_uop_ctrl_csr_cmd; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_ctrl_is_load_0 = io_brupdate_b2_uop_ctrl_is_load; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_ctrl_is_sta_0 = io_brupdate_b2_uop_ctrl_is_sta; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_ctrl_is_std_0 = io_brupdate_b2_uop_ctrl_is_std; // @[functional-unit.scala:482:7]
wire [1:0] io_brupdate_b2_uop_iw_state_0 = io_brupdate_b2_uop_iw_state; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_iw_p1_poisoned_0 = io_brupdate_b2_uop_iw_p1_poisoned; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_iw_p2_poisoned_0 = io_brupdate_b2_uop_iw_p2_poisoned; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_is_br_0 = io_brupdate_b2_uop_is_br; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_is_jalr_0 = io_brupdate_b2_uop_is_jalr; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_is_jal_0 = io_brupdate_b2_uop_is_jal; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[functional-unit.scala:482:7]
wire [7:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[functional-unit.scala:482:7]
wire [2:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[functional-unit.scala:482:7]
wire [3:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[functional-unit.scala:482:7]
wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[functional-unit.scala:482:7]
wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[functional-unit.scala:482:7]
wire [11:0] io_brupdate_b2_uop_csr_addr_0 = io_brupdate_b2_uop_csr_addr; // @[functional-unit.scala:482:7]
wire [4:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[functional-unit.scala:482:7]
wire [2:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[functional-unit.scala:482:7]
wire [2:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[functional-unit.scala:482:7]
wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[functional-unit.scala:482:7]
wire [5:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[functional-unit.scala:482:7]
wire [5:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[functional-unit.scala:482:7]
wire [5:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[functional-unit.scala:482:7]
wire [5:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[functional-unit.scala:482:7]
wire [3:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[functional-unit.scala:482:7]
wire [5:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[functional-unit.scala:482:7]
wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_bypassable_0 = io_brupdate_b2_uop_bypassable; // @[functional-unit.scala:482:7]
wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[functional-unit.scala:482:7]
wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[functional-unit.scala:482:7]
wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[functional-unit.scala:482:7]
wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[functional-unit.scala:482:7]
wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[functional-unit.scala:482:7]
wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_ldst_val_0 = io_brupdate_b2_uop_ldst_val; // @[functional-unit.scala:482:7]
wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[functional-unit.scala:482:7]
wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[functional-unit.scala:482:7]
wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_fp_single_0 = io_brupdate_b2_uop_fp_single; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[functional-unit.scala:482:7]
wire [1:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[functional-unit.scala:482:7]
wire [1:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_valid_0 = io_brupdate_b2_valid; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[functional-unit.scala:482:7]
wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[functional-unit.scala:482:7]
wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[functional-unit.scala:482:7]
wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[functional-unit.scala:482:7]
wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[functional-unit.scala:482:7]
wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[functional-unit.scala:482:7]
wire io_status_debug_0 = io_status_debug; // @[functional-unit.scala:482:7]
wire io_status_cease_0 = io_status_cease; // @[functional-unit.scala:482:7]
wire io_status_wfi_0 = io_status_wfi; // @[functional-unit.scala:482:7]
wire [1:0] io_status_dprv_0 = io_status_dprv; // @[functional-unit.scala:482:7]
wire io_status_dv_0 = io_status_dv; // @[functional-unit.scala:482:7]
wire [1:0] io_status_prv_0 = io_status_prv; // @[functional-unit.scala:482:7]
wire io_status_v_0 = io_status_v; // @[functional-unit.scala:482:7]
wire io_status_sd_0 = io_status_sd; // @[functional-unit.scala:482:7]
wire io_status_mpv_0 = io_status_mpv; // @[functional-unit.scala:482:7]
wire io_status_gva_0 = io_status_gva; // @[functional-unit.scala:482:7]
wire io_status_tsr_0 = io_status_tsr; // @[functional-unit.scala:482:7]
wire io_status_tw_0 = io_status_tw; // @[functional-unit.scala:482:7]
wire io_status_tvm_0 = io_status_tvm; // @[functional-unit.scala:482:7]
wire io_status_mxr_0 = io_status_mxr; // @[functional-unit.scala:482:7]
wire io_status_sum_0 = io_status_sum; // @[functional-unit.scala:482:7]
wire io_status_mprv_0 = io_status_mprv; // @[functional-unit.scala:482:7]
wire [1:0] io_status_fs_0 = io_status_fs; // @[functional-unit.scala:482:7]
wire [1:0] io_status_mpp_0 = io_status_mpp; // @[functional-unit.scala:482:7]
wire io_status_spp_0 = io_status_spp; // @[functional-unit.scala:482:7]
wire io_status_mpie_0 = io_status_mpie; // @[functional-unit.scala:482:7]
wire io_status_spie_0 = io_status_spie; // @[functional-unit.scala:482:7]
wire io_status_mie_0 = io_status_mie; // @[functional-unit.scala:482:7]
wire io_status_sie_0 = io_status_sie; // @[functional-unit.scala:482:7]
wire [31:0] io_status_isa = 32'h14112D; // @[functional-unit.scala:482:7]
wire [22:0] io_status_zero2 = 23'h0; // @[functional-unit.scala:482:7]
wire io_req_bits_pred_data = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_ready = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_predicated = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_valid = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_is_rvc = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_ctrl_fcn_dw = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_ctrl_is_load = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_ctrl_is_sta = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_ctrl_is_std = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_iw_p1_poisoned = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_iw_p2_poisoned = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_is_br = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_is_jalr = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_is_jal = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_is_sfb = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_edge_inst = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_taken = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_prs1_busy = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_prs2_busy = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_prs3_busy = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_ppred_busy = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_exception = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_bypassable = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_mem_signed = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_is_fence = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_is_fencei = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_is_amo = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_uses_ldq = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_uses_stq = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_is_sys_pc2epc = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_is_unique = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_flush_on_commit = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_ldst_is_rs1 = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_ldst_val = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_frs3_en = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_fp_val = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_fp_single = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_xcpt_pf_if = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_xcpt_ae_if = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_xcpt_ma_if = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_bp_debug_if = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_fflags_bits_uop_bp_xcpt_if = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_sfence_bits_hv = 1'h0; // @[functional-unit.scala:482:7]
wire io_resp_bits_sfence_bits_hg = 1'h0; // @[functional-unit.scala:482:7]
wire io_status_mbe = 1'h0; // @[functional-unit.scala:482:7]
wire io_status_sbe = 1'h0; // @[functional-unit.scala:482:7]
wire io_status_sd_rv32 = 1'h0; // @[functional-unit.scala:482:7]
wire io_status_ube = 1'h0; // @[functional-unit.scala:482:7]
wire io_status_upie = 1'h0; // @[functional-unit.scala:482:7]
wire io_status_hie = 1'h0; // @[functional-unit.scala:482:7]
wire io_status_uie = 1'h0; // @[functional-unit.scala:482:7]
wire _dbg_bp_T_1 = 1'h0; // @[functional-unit.scala:532:66]
wire _dbg_bp_T_3 = 1'h0; // @[functional-unit.scala:533:66]
wire _dbg_bp_T_4 = 1'h0; // @[functional-unit.scala:532:88]
wire dbg_bp = 1'h0; // @[functional-unit.scala:532:29]
wire _bp_T_1 = 1'h0; // @[functional-unit.scala:534:66]
wire _bp_T_3 = 1'h0; // @[functional-unit.scala:535:66]
wire _bp_T_4 = 1'h0; // @[functional-unit.scala:534:87]
wire bp = 1'h0; // @[functional-unit.scala:534:29]
wire [7:0] io_resp_bits_fflags_bits_uop_br_mask = 8'h0; // @[functional-unit.scala:482:7]
wire [7:0] io_status_zero1 = 8'h0; // @[functional-unit.scala:482:7]
wire [1:0] io_resp_bits_fflags_bits_uop_ctrl_op1_sel = 2'h0; // @[functional-unit.scala:482:7]
wire [1:0] io_resp_bits_fflags_bits_uop_iw_state = 2'h0; // @[functional-unit.scala:482:7]
wire [1:0] io_resp_bits_fflags_bits_uop_rxq_idx = 2'h0; // @[functional-unit.scala:482:7]
wire [1:0] io_resp_bits_fflags_bits_uop_mem_size = 2'h0; // @[functional-unit.scala:482:7]
wire [1:0] io_resp_bits_fflags_bits_uop_dst_rtype = 2'h0; // @[functional-unit.scala:482:7]
wire [1:0] io_resp_bits_fflags_bits_uop_lrs1_rtype = 2'h0; // @[functional-unit.scala:482:7]
wire [1:0] io_resp_bits_fflags_bits_uop_lrs2_rtype = 2'h0; // @[functional-unit.scala:482:7]
wire [1:0] io_resp_bits_fflags_bits_uop_debug_fsrc = 2'h0; // @[functional-unit.scala:482:7]
wire [1:0] io_resp_bits_fflags_bits_uop_debug_tsrc = 2'h0; // @[functional-unit.scala:482:7]
wire [1:0] io_status_xs = 2'h0; // @[functional-unit.scala:482:7]
wire [1:0] io_status_vs = 2'h0; // @[functional-unit.scala:482:7]
wire io_req_ready = 1'h1; // @[functional-unit.scala:482:7]
wire [64:0] io_req_bits_rs3_data = 65'h0; // @[functional-unit.scala:482:7]
wire [6:0] io_resp_bits_fflags_bits_uop_uopc = 7'h0; // @[functional-unit.scala:482:7]
wire [31:0] io_resp_bits_fflags_bits_uop_inst = 32'h0; // @[functional-unit.scala:482:7]
wire [31:0] io_resp_bits_fflags_bits_uop_debug_inst = 32'h0; // @[functional-unit.scala:482:7]
wire [39:0] io_resp_bits_fflags_bits_uop_debug_pc = 40'h0; // @[functional-unit.scala:482:7]
wire [2:0] io_resp_bits_fflags_bits_uop_iq_type = 3'h0; // @[functional-unit.scala:482:7]
wire [2:0] io_resp_bits_fflags_bits_uop_ctrl_op2_sel = 3'h0; // @[functional-unit.scala:482:7]
wire [2:0] io_resp_bits_fflags_bits_uop_ctrl_imm_sel = 3'h0; // @[functional-unit.scala:482:7]
wire [2:0] io_resp_bits_fflags_bits_uop_ctrl_csr_cmd = 3'h0; // @[functional-unit.scala:482:7]
wire [2:0] io_resp_bits_fflags_bits_uop_br_tag = 3'h0; // @[functional-unit.scala:482:7]
wire [2:0] io_resp_bits_fflags_bits_uop_ldq_idx = 3'h0; // @[functional-unit.scala:482:7]
wire [2:0] io_resp_bits_fflags_bits_uop_stq_idx = 3'h0; // @[functional-unit.scala:482:7]
wire [9:0] io_resp_bits_fflags_bits_uop_fu_code = 10'h0; // @[functional-unit.scala:482:7]
wire [3:0] io_resp_bits_fflags_bits_uop_ctrl_br_type = 4'h0; // @[functional-unit.scala:482:7]
wire [3:0] io_resp_bits_fflags_bits_uop_ftq_idx = 4'h0; // @[functional-unit.scala:482:7]
wire [3:0] io_resp_bits_fflags_bits_uop_ppred = 4'h0; // @[functional-unit.scala:482:7]
wire [4:0] io_resp_bits_fflags_bits_uop_ctrl_op_fcn = 5'h0; // @[functional-unit.scala:482:7]
wire [4:0] io_resp_bits_fflags_bits_uop_rob_idx = 5'h0; // @[functional-unit.scala:482:7]
wire [4:0] io_resp_bits_fflags_bits_uop_mem_cmd = 5'h0; // @[functional-unit.scala:482:7]
wire [4:0] io_resp_bits_fflags_bits_flags = 5'h0; // @[functional-unit.scala:482:7]
wire [5:0] io_resp_bits_fflags_bits_uop_pc_lob = 6'h0; // @[functional-unit.scala:482:7]
wire [5:0] io_resp_bits_fflags_bits_uop_pdst = 6'h0; // @[functional-unit.scala:482:7]
wire [5:0] io_resp_bits_fflags_bits_uop_prs1 = 6'h0; // @[functional-unit.scala:482:7]
wire [5:0] io_resp_bits_fflags_bits_uop_prs2 = 6'h0; // @[functional-unit.scala:482:7]
wire [5:0] io_resp_bits_fflags_bits_uop_prs3 = 6'h0; // @[functional-unit.scala:482:7]
wire [5:0] io_resp_bits_fflags_bits_uop_stale_pdst = 6'h0; // @[functional-unit.scala:482:7]
wire [5:0] io_resp_bits_fflags_bits_uop_ldst = 6'h0; // @[functional-unit.scala:482:7]
wire [5:0] io_resp_bits_fflags_bits_uop_lrs1 = 6'h0; // @[functional-unit.scala:482:7]
wire [5:0] io_resp_bits_fflags_bits_uop_lrs2 = 6'h0; // @[functional-unit.scala:482:7]
wire [5:0] io_resp_bits_fflags_bits_uop_lrs3 = 6'h0; // @[functional-unit.scala:482:7]
wire [19:0] io_resp_bits_fflags_bits_uop_imm_packed = 20'h0; // @[functional-unit.scala:482:7]
wire [11:0] io_resp_bits_fflags_bits_uop_csr_addr = 12'h0; // @[functional-unit.scala:482:7]
wire [63:0] io_resp_bits_fflags_bits_uop_exc_cause = 64'h0; // @[functional-unit.scala:482:7]
wire [1:0] io_status_sxl = 2'h2; // @[functional-unit.scala:482:7]
wire [1:0] io_status_uxl = 2'h2; // @[functional-unit.scala:482:7]
wire [6:0] io_resp_bits_uop_uopc_0 = io_req_bits_uop_uopc_0; // @[functional-unit.scala:482:7]
wire [31:0] io_resp_bits_uop_inst_0 = io_req_bits_uop_inst_0; // @[functional-unit.scala:482:7]
wire [31:0] io_resp_bits_uop_debug_inst_0 = io_req_bits_uop_debug_inst_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_is_rvc_0 = io_req_bits_uop_is_rvc_0; // @[functional-unit.scala:482:7]
wire [39:0] io_resp_bits_uop_debug_pc_0 = io_req_bits_uop_debug_pc_0; // @[functional-unit.scala:482:7]
wire [2:0] io_resp_bits_uop_iq_type_0 = io_req_bits_uop_iq_type_0; // @[functional-unit.scala:482:7]
wire [9:0] io_resp_bits_uop_fu_code_0 = io_req_bits_uop_fu_code_0; // @[functional-unit.scala:482:7]
wire [3:0] io_resp_bits_uop_ctrl_br_type_0 = io_req_bits_uop_ctrl_br_type_0; // @[functional-unit.scala:482:7]
wire [1:0] io_resp_bits_uop_ctrl_op1_sel_0 = io_req_bits_uop_ctrl_op1_sel_0; // @[functional-unit.scala:482:7]
wire [2:0] io_resp_bits_uop_ctrl_op2_sel_0 = io_req_bits_uop_ctrl_op2_sel_0; // @[functional-unit.scala:482:7]
wire [2:0] io_resp_bits_uop_ctrl_imm_sel_0 = io_req_bits_uop_ctrl_imm_sel_0; // @[functional-unit.scala:482:7]
wire [4:0] io_resp_bits_uop_ctrl_op_fcn_0 = io_req_bits_uop_ctrl_op_fcn_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_ctrl_fcn_dw_0 = io_req_bits_uop_ctrl_fcn_dw_0; // @[functional-unit.scala:482:7]
wire [2:0] io_resp_bits_uop_ctrl_csr_cmd_0 = io_req_bits_uop_ctrl_csr_cmd_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_ctrl_is_load_0 = io_req_bits_uop_ctrl_is_load_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_ctrl_is_sta_0 = io_req_bits_uop_ctrl_is_sta_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_ctrl_is_std_0 = io_req_bits_uop_ctrl_is_std_0; // @[functional-unit.scala:482:7]
wire [1:0] io_resp_bits_uop_iw_state_0 = io_req_bits_uop_iw_state_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_iw_p1_poisoned_0 = io_req_bits_uop_iw_p1_poisoned_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_iw_p2_poisoned_0 = io_req_bits_uop_iw_p2_poisoned_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_is_br_0 = io_req_bits_uop_is_br_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_is_jalr_0 = io_req_bits_uop_is_jalr_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_is_jal_0 = io_req_bits_uop_is_jal_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_is_sfb_0 = io_req_bits_uop_is_sfb_0; // @[functional-unit.scala:482:7]
wire [2:0] io_resp_bits_uop_br_tag_0 = io_req_bits_uop_br_tag_0; // @[functional-unit.scala:482:7]
wire [3:0] io_resp_bits_uop_ftq_idx_0 = io_req_bits_uop_ftq_idx_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_edge_inst_0 = io_req_bits_uop_edge_inst_0; // @[functional-unit.scala:482:7]
wire [5:0] io_resp_bits_uop_pc_lob_0 = io_req_bits_uop_pc_lob_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_taken_0 = io_req_bits_uop_taken_0; // @[functional-unit.scala:482:7]
wire [19:0] io_resp_bits_uop_imm_packed_0 = io_req_bits_uop_imm_packed_0; // @[functional-unit.scala:482:7]
wire [11:0] io_resp_bits_uop_csr_addr_0 = io_req_bits_uop_csr_addr_0; // @[functional-unit.scala:482:7]
wire [4:0] io_resp_bits_uop_rob_idx_0 = io_req_bits_uop_rob_idx_0; // @[functional-unit.scala:482:7]
wire [2:0] io_resp_bits_uop_ldq_idx_0 = io_req_bits_uop_ldq_idx_0; // @[functional-unit.scala:482:7]
wire [2:0] io_resp_bits_uop_stq_idx_0 = io_req_bits_uop_stq_idx_0; // @[functional-unit.scala:482:7]
wire [1:0] io_resp_bits_uop_rxq_idx_0 = io_req_bits_uop_rxq_idx_0; // @[functional-unit.scala:482:7]
wire [5:0] io_resp_bits_uop_pdst_0 = io_req_bits_uop_pdst_0; // @[functional-unit.scala:482:7]
wire [5:0] io_resp_bits_uop_prs1_0 = io_req_bits_uop_prs1_0; // @[functional-unit.scala:482:7]
wire [5:0] io_resp_bits_uop_prs2_0 = io_req_bits_uop_prs2_0; // @[functional-unit.scala:482:7]
wire [5:0] io_resp_bits_uop_prs3_0 = io_req_bits_uop_prs3_0; // @[functional-unit.scala:482:7]
wire [3:0] io_resp_bits_uop_ppred_0 = io_req_bits_uop_ppred_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_prs1_busy_0 = io_req_bits_uop_prs1_busy_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_prs2_busy_0 = io_req_bits_uop_prs2_busy_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_prs3_busy_0 = io_req_bits_uop_prs3_busy_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_ppred_busy_0 = io_req_bits_uop_ppred_busy_0; // @[functional-unit.scala:482:7]
wire [5:0] io_resp_bits_uop_stale_pdst_0 = io_req_bits_uop_stale_pdst_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_exception_0 = io_req_bits_uop_exception_0; // @[functional-unit.scala:482:7]
wire [63:0] io_resp_bits_uop_exc_cause_0 = io_req_bits_uop_exc_cause_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_bypassable_0 = io_req_bits_uop_bypassable_0; // @[functional-unit.scala:482:7]
wire [4:0] io_resp_bits_uop_mem_cmd_0 = io_req_bits_uop_mem_cmd_0; // @[functional-unit.scala:482:7]
wire [1:0] io_resp_bits_uop_mem_size_0 = io_req_bits_uop_mem_size_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_mem_signed_0 = io_req_bits_uop_mem_signed_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_is_fence_0 = io_req_bits_uop_is_fence_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_is_fencei_0 = io_req_bits_uop_is_fencei_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_is_amo_0 = io_req_bits_uop_is_amo_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_uses_ldq_0 = io_req_bits_uop_uses_ldq_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_uses_stq_0 = io_req_bits_uop_uses_stq_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_is_sys_pc2epc_0 = io_req_bits_uop_is_sys_pc2epc_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_is_unique_0 = io_req_bits_uop_is_unique_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_flush_on_commit_0 = io_req_bits_uop_flush_on_commit_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_ldst_is_rs1_0 = io_req_bits_uop_ldst_is_rs1_0; // @[functional-unit.scala:482:7]
wire [5:0] io_resp_bits_uop_ldst_0 = io_req_bits_uop_ldst_0; // @[functional-unit.scala:482:7]
wire [5:0] io_resp_bits_uop_lrs1_0 = io_req_bits_uop_lrs1_0; // @[functional-unit.scala:482:7]
wire [5:0] io_resp_bits_uop_lrs2_0 = io_req_bits_uop_lrs2_0; // @[functional-unit.scala:482:7]
wire [5:0] io_resp_bits_uop_lrs3_0 = io_req_bits_uop_lrs3_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_ldst_val_0 = io_req_bits_uop_ldst_val_0; // @[functional-unit.scala:482:7]
wire [1:0] io_resp_bits_uop_dst_rtype_0 = io_req_bits_uop_dst_rtype_0; // @[functional-unit.scala:482:7]
wire [1:0] io_resp_bits_uop_lrs1_rtype_0 = io_req_bits_uop_lrs1_rtype_0; // @[functional-unit.scala:482:7]
wire [1:0] io_resp_bits_uop_lrs2_rtype_0 = io_req_bits_uop_lrs2_rtype_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_frs3_en_0 = io_req_bits_uop_frs3_en_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_fp_val_0 = io_req_bits_uop_fp_val_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_fp_single_0 = io_req_bits_uop_fp_single_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_xcpt_pf_if_0 = io_req_bits_uop_xcpt_pf_if_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_xcpt_ae_if_0 = io_req_bits_uop_xcpt_ae_if_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_xcpt_ma_if_0 = io_req_bits_uop_xcpt_ma_if_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_bp_debug_if_0 = io_req_bits_uop_bp_debug_if_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_uop_bp_xcpt_if_0 = io_req_bits_uop_bp_xcpt_if_0; // @[functional-unit.scala:482:7]
wire [1:0] io_resp_bits_uop_debug_fsrc_0 = io_req_bits_uop_debug_fsrc_0; // @[functional-unit.scala:482:7]
wire [1:0] io_resp_bits_uop_debug_tsrc_0 = io_req_bits_uop_debug_tsrc_0; // @[functional-unit.scala:482:7]
wire [64:0] _sum_T = io_req_bits_rs1_data_0; // @[functional-unit.scala:482:7, :493:35]
wire [64:0] io_resp_bits_data_0 = io_req_bits_rs2_data_0; // @[functional-unit.scala:482:7]
wire _io_resp_valid_T_3; // @[functional-unit.scala:276:38]
wire [7:0] _io_resp_bits_uop_br_mask_T_1; // @[util.scala:85:25]
wire [39:0] effective_address; // @[functional-unit.scala:496:30]
wire xcpt_val; // @[functional-unit.scala:538:26]
wire _io_resp_bits_sfence_valid_T_1; // @[functional-unit.scala:549:45]
wire _io_resp_bits_sfence_bits_rs1_T; // @[functional-unit.scala:550:59]
wire _io_resp_bits_sfence_bits_rs2_T; // @[functional-unit.scala:551:59]
wire [7:0] io_resp_bits_uop_br_mask_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_mxcpt_valid_0; // @[functional-unit.scala:482:7]
wire [24:0] io_resp_bits_mxcpt_bits_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_sfence_bits_rs1_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_sfence_bits_rs2_0; // @[functional-unit.scala:482:7]
wire [38:0] io_resp_bits_sfence_bits_addr_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_sfence_bits_asid_0; // @[functional-unit.scala:482:7]
wire io_resp_bits_sfence_valid_0; // @[functional-unit.scala:482:7]
wire [39:0] io_resp_bits_addr_0; // @[functional-unit.scala:482:7]
wire io_resp_valid_0; // @[functional-unit.scala:482:7]
wire [7:0] _io_resp_valid_T = io_brupdate_b1_mispredict_mask_0 & io_req_bits_uop_br_mask_0; // @[util.scala:118:51]
wire _io_resp_valid_T_1 = |_io_resp_valid_T; // @[util.scala:118:{51,59}]
wire _io_resp_valid_T_2 = ~_io_resp_valid_T_1; // @[util.scala:118:59]
assign _io_resp_valid_T_3 = io_req_valid_0 & _io_resp_valid_T_2; // @[functional-unit.scala:276:{38,41}, :482:7]
assign io_resp_valid_0 = _io_resp_valid_T_3; // @[functional-unit.scala:276:38, :482:7]
wire [7:0] _io_resp_bits_uop_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:85:27]
assign _io_resp_bits_uop_br_mask_T_1 = io_req_bits_uop_br_mask_0 & _io_resp_bits_uop_br_mask_T; // @[util.scala:85:{25,27}]
assign io_resp_bits_uop_br_mask_0 = _io_resp_bits_uop_br_mask_T_1; // @[util.scala:85:25]
wire [11:0] _sum_T_1 = io_req_bits_uop_imm_packed_0[19:8]; // @[functional-unit.scala:482:7, :493:70]
wire [11:0] _sum_T_2 = _sum_T_1; // @[functional-unit.scala:493:{70,77}]
wire [65:0] _sum_T_3 = {_sum_T[64], _sum_T} + {{54{_sum_T_2[11]}}, _sum_T_2}; // @[functional-unit.scala:493:{35,42,77}]
wire [64:0] _sum_T_4 = _sum_T_3[64:0]; // @[functional-unit.scala:493:42]
wire [64:0] _sum_T_5 = _sum_T_4; // @[functional-unit.scala:493:42]
wire [64:0] sum = _sum_T_5; // @[functional-unit.scala:493:{42,85}]
wire _ea_sign_T = sum[38]; // @[functional-unit.scala:493:85, :494:24]
wire [24:0] _ea_sign_T_1 = sum[63:39]; // @[functional-unit.scala:493:85, :494:43]
wire [24:0] _ea_sign_T_4 = sum[63:39]; // @[functional-unit.scala:493:85, :494:43, :495:43]
wire [24:0] _ea_sign_T_2 = ~_ea_sign_T_1; // @[functional-unit.scala:494:{39,43}]
wire _ea_sign_T_3 = _ea_sign_T_2 == 25'h0; // @[functional-unit.scala:494:{39,58}]
wire _ea_sign_T_5 = |_ea_sign_T_4; // @[functional-unit.scala:495:{43,58}]
wire ea_sign = _ea_sign_T ? _ea_sign_T_3 : _ea_sign_T_5; // @[functional-unit.scala:494:{20,24,58}, :495:58]
wire [38:0] _effective_address_T = sum[38:0]; // @[functional-unit.scala:493:85, :496:43]
assign effective_address = {ea_sign, _effective_address_T}; // @[functional-unit.scala:494:20, :496:{30,43}]
assign io_resp_bits_addr_0 = effective_address; // @[functional-unit.scala:482:7, :496:30]
wire _misaligned_T = io_req_bits_uop_mem_size_0 == 2'h1; // @[functional-unit.scala:482:7, :518:11]
wire _misaligned_T_1 = effective_address[0]; // @[functional-unit.scala:496:30, :518:40]
wire _misaligned_T_2 = _misaligned_T_1; // @[functional-unit.scala:518:{40,44}]
wire _misaligned_T_3 = _misaligned_T & _misaligned_T_2; // @[functional-unit.scala:518:{11,19,44}]
wire _misaligned_T_4 = io_req_bits_uop_mem_size_0 == 2'h2; // @[functional-unit.scala:482:7, :519:11]
wire [1:0] _misaligned_T_5 = effective_address[1:0]; // @[functional-unit.scala:496:30, :519:40]
wire _misaligned_T_6 = |_misaligned_T_5; // @[functional-unit.scala:519:{40,46}]
wire _misaligned_T_7 = _misaligned_T_4 & _misaligned_T_6; // @[functional-unit.scala:519:{11,19,46}]
wire _misaligned_T_8 = _misaligned_T_3 | _misaligned_T_7; // @[functional-unit.scala:518:{19,54}, :519:19]
wire _misaligned_T_9 = &io_req_bits_uop_mem_size_0; // @[functional-unit.scala:482:7, :520:11]
wire [2:0] _misaligned_T_10 = effective_address[2:0]; // @[functional-unit.scala:496:30, :520:40]
wire _misaligned_T_11 = |_misaligned_T_10; // @[functional-unit.scala:520:{40,46}]
wire _misaligned_T_12 = _misaligned_T_9 & _misaligned_T_11; // @[functional-unit.scala:520:{11,19,46}]
wire misaligned = _misaligned_T_8 | _misaligned_T_12; // @[functional-unit.scala:518:54, :519:56, :520:19]
wire _GEN = io_req_bits_uop_uopc_0 == 7'h1; // @[functional-unit.scala:482:7, :530:53]
wire _ma_ld_T; // @[functional-unit.scala:530:53]
assign _ma_ld_T = _GEN; // @[functional-unit.scala:530:53]
wire _dbg_bp_T; // @[functional-unit.scala:532:55]
assign _dbg_bp_T = _GEN; // @[functional-unit.scala:530:53, :532:55]
wire _bp_T; // @[functional-unit.scala:534:55]
assign _bp_T = _GEN; // @[functional-unit.scala:530:53, :534:55]
wire _ma_ld_T_1 = io_req_valid_0 & _ma_ld_T; // @[functional-unit.scala:482:7, :530:{29,53}]
wire ma_ld = _ma_ld_T_1 & misaligned; // @[functional-unit.scala:519:56, :530:{29,63}]
wire _GEN_0 = io_req_bits_uop_uopc_0 == 7'h2; // @[functional-unit.scala:482:7, :531:54]
wire _ma_st_T; // @[functional-unit.scala:531:54]
assign _ma_st_T = _GEN_0; // @[functional-unit.scala:531:54]
wire _dbg_bp_T_2; // @[functional-unit.scala:533:55]
assign _dbg_bp_T_2 = _GEN_0; // @[functional-unit.scala:531:54, :533:55]
wire _bp_T_2; // @[functional-unit.scala:535:55]
assign _bp_T_2 = _GEN_0; // @[functional-unit.scala:531:54, :535:55]
wire _ma_st_T_1 = io_req_bits_uop_uopc_0 == 7'h43; // @[functional-unit.scala:482:7, :531:89]
wire _ma_st_T_2 = _ma_st_T | _ma_st_T_1; // @[functional-unit.scala:531:{54,65,89}]
wire _ma_st_T_3 = io_req_valid_0 & _ma_st_T_2; // @[functional-unit.scala:482:7, :531:{29,65}]
wire ma_st = _ma_st_T_3 & misaligned; // @[functional-unit.scala:519:56, :531:{29,104}]
assign xcpt_val = ma_ld | ma_st; // @[functional-unit.scala:530:63, :531:104, :538:26]
assign io_resp_bits_mxcpt_valid_0 = xcpt_val; // @[functional-unit.scala:482:7, :538:26]
wire [3:0] xcpt_cause = ma_ld ? 4'h4 : ma_st ? 4'h6 : 4'h3; // @[Mux.scala:50:70]
assign io_resp_bits_mxcpt_bits_0 = {21'h0, xcpt_cause}; // @[Mux.scala:50:70] |
Generate the Verilog code corresponding to the following Chisel files.
File SegmentedMultiplyPipe.scala:
package saturn.exu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util._
import freechips.rocketchip.tile._
import saturn.common._
import saturn.insns._
case class IntegerMultiplyFactory(depth: Int, segmented: Boolean) extends FunctionalUnitFactory {
def base_insns = Seq(
MUL.VV, MUL.VX, MULH.VV, MULH.VX,
MULHU.VV, MULHU.VX, MULHSU.VV, MULHSU.VX,
WMUL.VV, WMUL.VX, WMULU.VV, WMULU.VX,
WMULSU.VV, WMULSU.VX,
MACC.VV, MACC.VX, NMSAC.VV, NMSAC.VX,
MADD.VV, MADD.VX, NMSUB.VV, NMSUB.VX,
WMACC.VV, WMACC.VX, WMACCU.VV, WMACCU.VX,
WMACCSU.VV , WMACCSU.VX, WMACCUS.VV, WMACCUS.VX,
SMUL.VV, SMUL.VX)
def insns = if (segmented) base_insns else base_insns.map(_.elementWise)
def generate(implicit p: Parameters) = if (segmented) {
new SegmentedMultiplyPipe(depth)(p)
} else {
new ElementwiseMultiplyPipe(depth)(p)
}
}
class SegmentedMultiplyPipe(depth: Int)(implicit p: Parameters) extends PipelinedFunctionalUnit(depth)(p) {
val supported_insns = IntegerMultiplyFactory(depth, true).insns
io.iss.ready := new VectorDecoder(io.iss.op.funct3, io.iss.op.funct6, 0.U, 0.U, supported_insns, Nil).matched
io.set_vxsat := false.B
io.set_fflags.valid := false.B
io.set_fflags.bits := DontCare
val ctrl = new VectorDecoder(io.pipe(0).bits.funct3, io.pipe(0).bits.funct6, 0.U, 0.U, supported_insns, Seq(
MULHi, MULSign1, MULSign2, MULSwapVdV2, MULAccumulate, MULSub))
val in_eew = io.pipe(0).bits.rvs1_eew
val out_eew = io.pipe(0).bits.vd_eew
val in_vs1 = io.pipe(0).bits.rvs1_data
val in_vs2 = io.pipe(0).bits.rvs2_data
val in_vd = io.pipe(0).bits.rvd_data
val mul_in1 = in_vs1
val mul_in2 = Mux(ctrl.bool(MULSwapVdV2), in_vd, in_vs2)
val multipliers = Seq.fill(dLenB >> 3)(Module(new MultiplyBlock))
for (i <- 0 until (dLenB >> 3)) {
multipliers(i).io.in1_signed := ctrl.bool(MULSign1)
multipliers(i).io.in2_signed := ctrl.bool(MULSign2)
multipliers(i).io.eew := io.pipe(0).bits.rvs1_eew
multipliers(i).io.in1 := mul_in1.asTypeOf(Vec(dLenB >> 3, UInt(64.W)))(i)
multipliers(i).io.in2 := mul_in2.asTypeOf(Vec(dLenB >> 3, UInt(64.W)))(i)
}
val mul_out_comb = VecInit(multipliers.map(_.io.out_data)).asUInt
////////////////////////////////////////////////////////////////////////////////////////////
// Pipeline Stages Before Adder Array
////////////////////////////////////////////////////////////////////////////////////////////
val mul_out = Pipe(io.pipe(0).valid, mul_out_comb, depth-2).bits
val in_eew_pipe = io.pipe(depth-2).bits.rvs1_eew
val out_eew_pipe = io.pipe(depth-2).bits.vd_eew
val ctrl_wmul = out_eew_pipe > in_eew_pipe
val ctrl_smul = io.pipe(depth-2).bits.isOpi
val ctrl_MULSub = Pipe(io.pipe(0).valid, ctrl.bool(MULSub), depth-2).bits
val ctrl_MULSwapVdV2 = Pipe(io.pipe(0).valid, ctrl.bool(MULSwapVdV2), depth-2).bits
val ctrl_MULAccumulate = Pipe(io.pipe(0).valid, ctrl.bool(MULAccumulate), depth-2).bits
val ctrl_MULHi = Pipe(io.pipe(0).valid, ctrl.bool(MULHi), depth-2).bits
val in_vs2_pipe = io.pipe(depth-2).bits.rvs2_data
val in_vd_pipe = io.pipe(depth-2).bits.rvd_data
////////////////////////////////////////////////////////////////////////////////////////////
val hi = VecInit.tabulate(4)({sew =>
VecInit(mul_out.asTypeOf(Vec((2*dLenB) >> sew, UInt((8 << sew).W))).grouped(2).map(_.last).toSeq).asUInt
})(in_eew_pipe)
val lo = VecInit.tabulate(4)({sew =>
VecInit(mul_out.asTypeOf(Vec((2*dLenB) >> sew, UInt((8 << sew).W))).grouped(2).map(_.head).toSeq).asUInt
})(in_eew_pipe)
val half_sel = (io.pipe(depth-2).bits.eidx >> (dLenOffBits.U - out_eew_pipe))(0)
val wide = Mux(half_sel, mul_out >> dLen, mul_out)(dLen-1,0)
val (smul_clipped, smul_sat) = {
val smul_arr = Module(new VectorSMul)
smul_arr.io.mul_in := mul_out
smul_arr.io.eew := out_eew_pipe
smul_arr.io.vxrm := io.pipe(depth-2).bits.vxrm
(smul_arr.io.clipped, smul_arr.io.sat)
}
val adder_arr = Module(new AdderArray(dLenB))
adder_arr.io.in1 := Mux(ctrl_wmul, wide, lo).asTypeOf(Vec(dLenB, UInt(8.W)))
adder_arr.io.in2 := Mux(ctrl_MULAccumulate, Mux(ctrl_MULSwapVdV2, in_vs2_pipe, in_vd_pipe), 0.U(dLen.W)).asTypeOf(Vec(dLenB, UInt(8.W)))
adder_arr.io.incr := VecInit.fill(dLenB)(false.B)
adder_arr.io.mask_carry := 0.U
adder_arr.io.signed := DontCare
adder_arr.io.eew := out_eew_pipe
adder_arr.io.avg := false.B
adder_arr.io.rm := DontCare
adder_arr.io.sub := ctrl_MULSub
adder_arr.io.cmask := false.B
val add_out = adder_arr.io.out
val out = Mux(ctrl_smul, smul_clipped, 0.U) | Mux(ctrl_MULHi, hi, 0.U) | Mux(!ctrl_smul && !ctrl_MULHi, add_out.asUInt, 0.U)
val pipe_out = Pipe(io.pipe(depth-2).valid, out, 1).bits
val vxsat = Mux(ctrl_smul, smul_sat, 0.U) & io.pipe(depth-2).bits.wmask
val pipe_vxsat = Pipe(io.pipe(depth-2).valid, vxsat, 1).bits
io.pipe0_stall := false.B
io.write.valid := io.pipe(depth-1).valid
io.write.bits.eg := io.pipe(depth-1).bits.wvd_eg
io.write.bits.data := pipe_out
io.write.bits.mask := FillInterleaved(8, io.pipe(depth-1).bits.wmask)
io.set_vxsat := io.pipe(depth-1).valid && (pipe_vxsat =/= 0.U)
io.scalar_write.valid := false.B
io.scalar_write.bits := DontCare
}
class MultiplyBlock extends Module {
val xLen = 64
val io = IO(new Bundle {
val in1_signed = Input(Bool())
val in2_signed = Input(Bool())
val eew = Input(UInt(2.W))
val in1 = Input(UInt(xLen.W))
val in2 = Input(UInt(xLen.W))
val out_data = Output(UInt((2*xLen).W))
})
val mul64 = Module(new Multiplier(64))
mul64.io.in1_signed := io.in1_signed
mul64.io.in2_signed := io.in2_signed
mul64.io.in1 := io.in1
mul64.io.in2 := io.in2
val mul32 = Module(new Multiplier(32))
mul32.io.in1_signed := io.in1_signed
mul32.io.in2_signed := io.in2_signed
mul32.io.in1 := io.in1(63,32)
mul32.io.in2 := io.in2(63,32)
val mul16 = Seq.tabulate(2) { i =>
val indh = 32*(i+1) - 1
val indl = 32*i + 16
val in1 = io.in1(indh, indl)
val in2 = io.in2(indh, indl)
val mul = Module(new Multiplier(16))
mul.io.in1_signed := io.in1_signed
mul.io.in2_signed := io.in2_signed
mul.io.in1 := in1
mul.io.in2 := in2
mul
}
val mul8 = Seq.tabulate(4) { i =>
val indh = 16*(i+1) - 1
val indl = 16*i + 8
val in1 = io.in1(indh, indl)
val in2 = io.in2(indh, indl)
val mul = Module(new Multiplier(8))
mul.io.in1_signed := io.in1_signed
mul.io.in2_signed := io.in2_signed
mul.io.in1 := in1
mul.io.in2 := in2
mul
}
when (io.eew === 0.U) {
mul16(1).io.in1 := Cat(Fill(8, io.in1_signed && io.in1(55)), io.in1(55, 48))
mul16(1).io.in2 := Cat(Fill(8, io.in2_signed && io.in2(55)), io.in2(55, 48))
mul32.io.in1 := Cat(Fill(8, io.in1_signed && io.in1(39)), io.in1(39, 32))
mul32.io.in2 := Cat(Fill(8, io.in2_signed && io.in2(39)), io.in2(39, 32))
mul16(0).io.in1 := Cat(Fill(8, io.in1_signed && io.in1(23)), io.in1(23, 16))
mul16(0).io.in2 := Cat(Fill(8, io.in2_signed && io.in2(23)), io.in2(23, 16))
mul64.io.in1 := Cat(Fill(8, io.in1_signed && io.in1(7)), io.in1(7,0))
mul64.io.in2 := Cat(Fill(8, io.in2_signed && io.in2(7)), io.in2(7,0))
io.out_data := Cat(mul8(3).io.out_data,
mul16(1).io.out_data(15,0),
mul8(2).io.out_data,
mul32.io.out_data(15,0),
mul8(1).io.out_data,
mul16(0).io.out_data(15,0),
mul8(0).io.out_data,
mul64.io.out_data(15,0))
}
.elsewhen (io.eew === 1.U) {
mul32.io.in1 := Cat(Fill(16, io.in1_signed && io.in1(47)), io.in1(47, 32))
mul32.io.in2 := Cat(Fill(16, io.in2_signed && io.in2(47)), io.in2(47, 32))
mul64.io.in1 := Cat(Fill(16, io.in1_signed && io.in1(15)), io.in1(15,0))
mul64.io.in2 := Cat(Fill(16, io.in2_signed && io.in2(15)), io.in2(15,0))
io.out_data := Cat(mul16(1).io.out_data,
mul32.io.out_data(31,0),
mul16(0).io.out_data,
mul64.io.out_data(31,0))
}
.elsewhen (io.eew === 2.U) {
mul64.io.in1 := Cat(Fill(32, io.in1_signed && io.in1(31)), io.in1(31,0))
mul64.io.in2 := Cat(Fill(32, io.in2_signed && io.in2(31)), io.in2(31,0))
io.out_data := Cat(mul32.io.out_data,
mul64.io.out_data(63,0))
}
.otherwise {
io.out_data := mul64.io.out_data
}
}
class Multiplier(width: Int) extends Module {
val io = IO(new Bundle {
val in1_signed = Input(Bool())
val in2_signed = Input(Bool())
val in1 = Input(UInt(width.W))
val in2 = Input(UInt(width.W))
val out_data = Output(UInt((2*width).W))
})
val lhs = Cat(io.in1_signed && io.in1(width-1), io.in1).asSInt
val rhs = Cat(io.in2_signed && io.in2(width-1), io.in2).asSInt
val prod = lhs * rhs
io.out_data := prod(2*width-1,0)
}
class VectorSMul(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams {
val io = IO(new Bundle {
val eew = Input(UInt(2.W))
val vxrm = Input(UInt(2.W))
val mul_in = Input(UInt((2*dLen).W))
val clipped = Output(UInt((dLen).W))
val sat = Output(UInt(dLenB.W))
})
val sat_sew = Wire(Vec(4, UInt(dLenB.W)))
val clipped_sew = Wire(Vec(4, UInt(dLen.W)))
for (sew <- 0 until 4) {
val wideProds = io.mul_in.asTypeOf(Vec(dLenB >> sew, SInt((16 << sew).W)))
val smul = wideProds.map { wideElem =>
val rounding_incr = RoundingIncrement(io.vxrm, wideElem((8 << sew)-1, 0))
(wideElem >> ((8 << sew) - 1)) + Cat(0.U(1.W), rounding_incr).asSInt
}
val clip_neg = (-1 << ((8 << sew)-1)).S
val clip_pos = ((1 << ((8 << sew)-1)) - 1).S
val clip_hi = smul.map{ _ > clip_pos }
val clip_lo = smul.map{ _ < clip_neg }
clipped_sew(sew) := smul.zipWithIndex.map { case (sm, i) =>
val long = Mux(clip_hi(i), clip_pos, 0.S) | Mux(clip_lo(i), clip_neg, 0.S) | Mux(!clip_hi(i) && !clip_lo(i), sm, 0.S)
long((8 << sew)-1, 0)
}.asUInt
val sat_vec_sew = smul.zipWithIndex.map { case (sm, i) =>
clip_hi(i) || clip_lo(i)
}
sat_sew(sew) := FillInterleaved((1 << sew), sat_vec_sew)
}
io.clipped := clipped_sew(io.eew)
io.sat := sat_sew(io.eew)
}
| module Multiplier_4( // @[SegmentedMultiplyPipe.scala:219:7]
input io_in1_signed, // @[SegmentedMultiplyPipe.scala:220:14]
input io_in2_signed, // @[SegmentedMultiplyPipe.scala:220:14]
input [7:0] io_in1, // @[SegmentedMultiplyPipe.scala:220:14]
input [7:0] io_in2, // @[SegmentedMultiplyPipe.scala:220:14]
output [15:0] io_out_data // @[SegmentedMultiplyPipe.scala:220:14]
);
assign io_out_data = {{8{io_in1_signed & io_in1[7]}}, io_in1} * {{8{io_in2_signed & io_in2[7]}}, io_in2}; // @[SegmentedMultiplyPipe.scala:219:7, :230:{31,40}, :231:{31,40}, :233:18]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File Debug.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.devices.debug
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.amba.apb.{APBFanout, APBToTL}
import freechips.rocketchip.devices.debug.systembusaccess.{SBToTL, SystemBusAccessModule}
import freechips.rocketchip.devices.tilelink.{DevNullParams, TLBusBypass, TLError}
import freechips.rocketchip.diplomacy.{AddressSet, BufferParams}
import freechips.rocketchip.resources.{Description, Device, Resource, ResourceBindings, ResourceString, SimpleDevice}
import freechips.rocketchip.interrupts.{IntNexusNode, IntSinkParameters, IntSinkPortParameters, IntSourceParameters, IntSourcePortParameters, IntSyncCrossingSource, IntSyncIdentityNode}
import freechips.rocketchip.regmapper.{RegField, RegFieldAccessType, RegFieldDesc, RegFieldGroup, RegFieldWrType, RegReadFn, RegWriteFn}
import freechips.rocketchip.rocket.{CSRs, Instructions}
import freechips.rocketchip.tile.MaxHartIdBits
import freechips.rocketchip.tilelink.{TLAsyncCrossingSink, TLAsyncCrossingSource, TLBuffer, TLRegisterNode, TLXbar}
import freechips.rocketchip.util.{Annotated, AsyncBundle, AsyncQueueParams, AsyncResetSynchronizerShiftReg, FromAsyncBundle, ParameterizedBundle, ResetSynchronizerShiftReg, ToAsyncBundle}
import freechips.rocketchip.util.SeqBoolBitwiseOps
import freechips.rocketchip.util.SeqToAugmentedSeq
import freechips.rocketchip.util.BooleanToAugmentedBoolean
object DsbBusConsts {
def sbAddrWidth = 12
def sbIdWidth = 10
}
object DsbRegAddrs{
// These are used by the ROM.
def HALTED = 0x100
def GOING = 0x104
def RESUMING = 0x108
def EXCEPTION = 0x10C
def WHERETO = 0x300
// This needs to be aligned for up to lq/sq
// This shows up in HartInfo, and needs to be aligned
// to enable up to LQ/SQ instructions.
def DATA = 0x380
// We want DATA to immediately follow PROGBUF so that we can
// use them interchangeably. Leave another slot if there is an
// implicit ebreak.
def PROGBUF(cfg:DebugModuleParams) = {
val tmp = DATA - (cfg.nProgramBufferWords * 4)
if (cfg.hasImplicitEbreak) (tmp - 4) else tmp
}
// This is unused if hasImpEbreak is false, and just points to the end of the PROGBUF.
def IMPEBREAK(cfg: DebugModuleParams) = { DATA - 4 }
// We want abstract to be immediately before PROGBUF
// because we auto-generate 2 (or 5) instructions.
def ABSTRACT(cfg:DebugModuleParams) = PROGBUF(cfg) - (cfg.nAbstractInstructions * 4)
def FLAGS = 0x400
def ROMBASE = 0x800
}
/** Enumerations used both in the hardware
* and in the configuration specification.
*/
object DebugModuleAccessType extends scala.Enumeration {
type DebugModuleAccessType = Value
val Access8Bit, Access16Bit, Access32Bit, Access64Bit, Access128Bit = Value
}
object DebugAbstractCommandError extends scala.Enumeration {
type DebugAbstractCommandError = Value
val Success, ErrBusy, ErrNotSupported, ErrException, ErrHaltResume = Value
}
object DebugAbstractCommandType extends scala.Enumeration {
type DebugAbstractCommandType = Value
val AccessRegister, QuickAccess = Value
}
/** Parameters exposed to the top-level design, set based on
* external requirements, etc.
*
* This object checks that the parameters conform to the
* full specification. The implementation which receives this
* object can perform more checks on what that implementation
* actually supports.
* @param nComponents Number of components to support debugging.
* @param baseAddress Base offest for debugEntry and debugException
* @param nDMIAddrSize Size of the Debug Bus Address
* @param nAbstractDataWords Number of 32-bit words for Abstract Commands
* @param nProgamBufferWords Number of 32-bit words for Program Buffer
* @param hasBusMaster Whether or not a bus master should be included
* @param clockGate Whether or not to use dmactive as the clockgate for debug module
* @param maxSupportedSBAccess Maximum transaction size supported by System Bus Access logic.
* @param supportQuickAccess Whether or not to support the quick access command.
* @param supportHartArray Whether or not to implement the hart array register (if >1 hart).
* @param nHaltGroups Number of halt groups
* @param nExtTriggers Number of external triggers
* @param hasHartResets Feature to reset all the currently selected harts
* @param hasImplicitEbreak There is an additional RO program buffer word containing an ebreak
* @param crossingHasSafeReset Include "safe" logic in Async Crossings so that only one side needs to be reset.
*/
case class DebugModuleParams (
baseAddress : BigInt = BigInt(0),
nDMIAddrSize : Int = 7,
nProgramBufferWords: Int = 16,
nAbstractDataWords : Int = 4,
nScratch : Int = 1,
hasBusMaster : Boolean = false,
clockGate : Boolean = true,
maxSupportedSBAccess : Int = 32,
supportQuickAccess : Boolean = false,
supportHartArray : Boolean = true,
nHaltGroups : Int = 1,
nExtTriggers : Int = 0,
hasHartResets : Boolean = false,
hasImplicitEbreak : Boolean = false,
hasAuthentication : Boolean = false,
crossingHasSafeReset : Boolean = true
) {
require ((nDMIAddrSize >= 7) && (nDMIAddrSize <= 32), s"Legal DMIAddrSize is 7-32, not ${nDMIAddrSize}")
require ((nAbstractDataWords > 0) && (nAbstractDataWords <= 16), s"Legal nAbstractDataWords is 0-16, not ${nAbstractDataWords}")
require ((nProgramBufferWords >= 0) && (nProgramBufferWords <= 16), s"Legal nProgramBufferWords is 0-16, not ${nProgramBufferWords}")
require (nHaltGroups < 32, s"Legal nHaltGroups is 0-31, not ${nHaltGroups}")
require (nExtTriggers <= 16, s"Legal nExtTriggers is 0-16, not ${nExtTriggers}")
if (supportQuickAccess) {
// TODO: Check that quick access requirements are met.
}
def address = AddressSet(baseAddress, 0xFFF)
/** the base address of DM */
def atzero = (baseAddress == 0)
/** The number of generated instructions
*
* When the base address is not zero, we need more instruction also,
* more dscratch registers) to load/store memory mapped data register
* because they may no longer be directly addressible with x0 + 12-bit imm
*/
def nAbstractInstructions = if (atzero) 2 else 5
def debugEntry: BigInt = baseAddress + 0x800
def debugException: BigInt = baseAddress + 0x808
def nDscratch: Int = if (atzero) 1 else 2
}
object DefaultDebugModuleParams {
def apply(xlen:Int /*TODO , val configStringAddr: Int*/): DebugModuleParams = {
new DebugModuleParams().copy(
nAbstractDataWords = (if (xlen == 32) 1 else if (xlen == 64) 2 else 4),
maxSupportedSBAccess = xlen
)
}
}
case object DebugModuleKey extends Field[Option[DebugModuleParams]](Some(DebugModuleParams()))
/** Functional parameters exposed to the design configuration.
*
* hartIdToHartSel: For systems where hart ids are not 1:1 with hartsel, provide the mapping.
* hartSelToHartId: Provide inverse mapping of the above
*/
case class DebugModuleHartSelFuncs (
hartIdToHartSel : (UInt) => UInt = (x:UInt) => x,
hartSelToHartId : (UInt) => UInt = (x:UInt) => x
)
case object DebugModuleHartSelKey extends Field(DebugModuleHartSelFuncs())
class DebugExtTriggerOut (val nExtTriggers: Int) extends Bundle {
val req = Output(UInt(nExtTriggers.W))
val ack = Input(UInt(nExtTriggers.W))
}
class DebugExtTriggerIn (val nExtTriggers: Int) extends Bundle {
val req = Input(UInt(nExtTriggers.W))
val ack = Output(UInt(nExtTriggers.W))
}
class DebugExtTriggerIO () (implicit val p: Parameters) extends ParameterizedBundle()(p) {
val out = new DebugExtTriggerOut(p(DebugModuleKey).get.nExtTriggers)
val in = new DebugExtTriggerIn (p(DebugModuleKey).get.nExtTriggers)
}
class DebugAuthenticationIO () (implicit val p: Parameters) extends ParameterizedBundle()(p) {
val dmactive = Output(Bool())
val dmAuthWrite = Output(Bool())
val dmAuthRead = Output(Bool())
val dmAuthWdata = Output(UInt(32.W))
val dmAuthBusy = Input(Bool())
val dmAuthRdata = Input(UInt(32.W))
val dmAuthenticated = Input(Bool())
}
// *****************************************
// Module Interfaces
//
// *****************************************
/** Control signals for Inner, generated in Outer
* {{{
* run control: resumreq, ackhavereset, halt-on-reset mask
* hart select: hasel, hartsel and the hart array mask
* }}}
*/
class DebugInternalBundle (val nComponents: Int)(implicit val p: Parameters) extends ParameterizedBundle()(p) {
/** resume request */
val resumereq = Bool()
/** hart select */
val hartsel = UInt(10.W)
/** reset acknowledge */
val ackhavereset = Bool()
/** hart array enable */
val hasel = Bool()
/** hart array mask */
val hamask = Vec(nComponents, Bool())
/** halt-on-reset mask */
val hrmask = Vec(nComponents, Bool())
}
/** structure for top-level Debug Module signals which aren't the bus interfaces. */
class DebugCtrlBundle (nComponents: Int)(implicit val p: Parameters) extends ParameterizedBundle()(p) {
/** debug availability status for all harts */
val debugUnavail = Input(Vec(nComponents, Bool()))
/** reset signal
*
* for every part of the hardware platform,
* including every hart, except for the DM and any
* logic required to access the DM
*/
val ndreset = Output(Bool())
/** reset signal for the DM itself */
val dmactive = Output(Bool())
/** dmactive acknowlege */
val dmactiveAck = Input(Bool())
}
// *****************************************
// Debug Module
//
// *****************************************
/** Parameterized version of the Debug Module defined in the
* RISC-V Debug Specification
*
* DebugModule is a slave to two asynchronous masters:
* The Debug Bus (DMI) -- This is driven by an external debugger
*
* The System Bus -- This services requests from the cores. Generally
* this interface should only be active at the request
* of the debugger, but the Debug Module may also
* provide the default MTVEC since it is mapped
* to address 0x0.
*
* DebugModule is responsible for control registers and RAM, and
* Debug ROM. It runs partially off of the dmiClk (e.g. TCK) and
* the TL clock. Therefore, it is divided into "Outer" portion (running
* off dmiClock and dmiReset) and "Inner" (running off tl_clock and tl_reset).
* This allows DMCONTROL.haltreq, hartsel, hasel, hawindowsel, hawindow, dmactive,
* and ndreset to be modified even while the Core is in reset or not being clocked.
* Not all reads from the Debugger to the Debug Module will actually complete
* in these scenarios either, they will just block until tl_clock and tl_reset
* allow them to complete. This is not strictly necessary for
* proper debugger functionality.
*/
// Local reg mapper function : Notify when written, but give the value as well.
object WNotifyWire {
def apply(n: Int, value: UInt, set: Bool, name: String, desc: String) : RegField = {
RegField(n, 0.U, RegWriteFn((valid, data) => {
set := valid
value := data
true.B
}), Some(RegFieldDesc(name = name, desc = desc,
access = RegFieldAccessType.W)))
}
}
// Local reg mapper function : Notify when accessed either as read or write.
object RWNotify {
def apply (n: Int, rVal: UInt, wVal: UInt, rNotify: Bool, wNotify: Bool, desc: Option[RegFieldDesc] = None): RegField = {
RegField(n,
RegReadFn ((ready) => {rNotify := ready ; (true.B, rVal)}),
RegWriteFn((valid, data) => {
wNotify := valid
when (valid) {wVal := data}
true.B
}
), desc)
}
}
// Local reg mapper function : Notify with value when written, take read input as presented.
// This allows checking or correcting the write value before storing it in the register field.
object WNotifyVal {
def apply(n: Int, rVal: UInt, wVal: UInt, wNotify: Bool, desc: RegFieldDesc): RegField = {
RegField(n, rVal, RegWriteFn((valid, data) => {
wNotify := valid
wVal := data
true.B
}
), desc)
}
}
class TLDebugModuleOuter(device: Device)(implicit p: Parameters) extends LazyModule {
// For Shorter Register Names
import DMI_RegAddrs._
val cfg = p(DebugModuleKey).get
val intnode = IntNexusNode(
sourceFn = { _ => IntSourcePortParameters(Seq(IntSourceParameters(1, Seq(Resource(device, "int"))))) },
sinkFn = { _ => IntSinkPortParameters(Seq(IntSinkParameters())) },
outputRequiresInput = false)
val dmiNode = TLRegisterNode (
address = AddressSet.misaligned(DMI_DMCONTROL << 2, 4) ++
AddressSet.misaligned(DMI_HARTINFO << 2, 4) ++
AddressSet.misaligned(DMI_HAWINDOWSEL << 2, 4) ++
AddressSet.misaligned(DMI_HAWINDOW << 2, 4),
device = device,
beatBytes = 4,
executable = false
)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
require (intnode.edges.in.size == 0, "Debug Module does not accept interrupts")
val nComponents = intnode.out.size
def getNComponents = () => nComponents
val supportHartArray = cfg.supportHartArray && (nComponents > 1) // no hart array if only one hart
val io = IO(new Bundle {
/** structure for top-level Debug Module signals which aren't the bus interfaces. */
val ctrl = (new DebugCtrlBundle(nComponents))
/** control signals for Inner, generated in Outer */
val innerCtrl = new DecoupledIO(new DebugInternalBundle(nComponents))
/** debug interruption from Inner to Outer
*
* contains 2 type of debug interruption causes:
* - halt group
* - halt-on-reset
*/
val hgDebugInt = Input(Vec(nComponents, Bool()))
/** hart reset request to core */
val hartResetReq = cfg.hasHartResets.option(Output(Vec(nComponents, Bool())))
/** authentication support */
val dmAuthenticated = cfg.hasAuthentication.option(Input(Bool()))
})
val omRegMap = withReset(reset.asAsyncReset) {
// FIXME: Instead of casting reset to ensure it is Async, assert/require reset.Type == AsyncReset (when this feature is available)
val dmAuthenticated = io.dmAuthenticated.map( dma =>
ResetSynchronizerShiftReg(in=dma, sync=3, name=Some("dmAuthenticated_sync"))).getOrElse(true.B)
//----DMCONTROL (The whole point of 'Outer' is to maintain this register on dmiClock (e.g. TCK) domain, so that it
// can be written even if 'Inner' is not being clocked or is in reset. This allows halting
// harts while the rest of the system is in reset. It doesn't really allow any other
// register accesses, which will keep returning 'busy' to the debugger interface.
val DMCONTROLReset = WireInit(0.U.asTypeOf(new DMCONTROLFields()))
val DMCONTROLNxt = WireInit(0.U.asTypeOf(new DMCONTROLFields()))
val DMCONTROLReg = RegNext(next=DMCONTROLNxt, init=0.U.asTypeOf(DMCONTROLNxt)).suggestName("DMCONTROLReg")
val hartsel_mask = if (nComponents > 1) ((1 << p(MaxHartIdBits)) - 1).U else 0.U
val DMCONTROLWrData = WireInit(0.U.asTypeOf(new DMCONTROLFields()))
val dmactiveWrEn = WireInit(false.B)
val ndmresetWrEn = WireInit(false.B)
val clrresethaltreqWrEn = WireInit(false.B)
val setresethaltreqWrEn = WireInit(false.B)
val hartselloWrEn = WireInit(false.B)
val haselWrEn = WireInit(false.B)
val ackhaveresetWrEn = WireInit(false.B)
val hartresetWrEn = WireInit(false.B)
val resumereqWrEn = WireInit(false.B)
val haltreqWrEn = WireInit(false.B)
val dmactive = DMCONTROLReg.dmactive
DMCONTROLNxt := DMCONTROLReg
when (~dmactive) {
DMCONTROLNxt := DMCONTROLReset
} .otherwise {
when (dmAuthenticated && ndmresetWrEn) { DMCONTROLNxt.ndmreset := DMCONTROLWrData.ndmreset }
when (dmAuthenticated && hartselloWrEn) { DMCONTROLNxt.hartsello := DMCONTROLWrData.hartsello & hartsel_mask}
when (dmAuthenticated && haselWrEn) { DMCONTROLNxt.hasel := DMCONTROLWrData.hasel }
when (dmAuthenticated && hartresetWrEn) { DMCONTROLNxt.hartreset := DMCONTROLWrData.hartreset }
when (dmAuthenticated && haltreqWrEn) { DMCONTROLNxt.haltreq := DMCONTROLWrData.haltreq }
}
// Put this last to override its own effects.
when (dmactiveWrEn) {
DMCONTROLNxt.dmactive := DMCONTROLWrData.dmactive
}
//----HARTINFO
// DATA registers are mapped to memory. The dataaddr field of HARTINFO has only
// 12 bits and assumes the DM base is 0. If not at 0, then HARTINFO reads as 0
// (implying nonexistence according to the Debug Spec).
val HARTINFORdData = WireInit(0.U.asTypeOf(new HARTINFOFields()))
if (cfg.atzero) when (dmAuthenticated) {
HARTINFORdData.dataaccess := true.B
HARTINFORdData.datasize := cfg.nAbstractDataWords.U
HARTINFORdData.dataaddr := DsbRegAddrs.DATA.U
HARTINFORdData.nscratch := cfg.nScratch.U
}
//--------------------------------------------------------------
// Hart array mask and window
// hamask is hart array mask(1 bit per component), which doesn't include the hart selected by dmcontrol.hartsello
// HAWINDOWSEL selects a 32-bit slice of HAMASK to be visible for read/write in HAWINDOW
//--------------------------------------------------------------
val hamask = WireInit(VecInit(Seq.fill(nComponents) {false.B} ))
def haWindowSize = 32
// The following need to be declared even if supportHartArray is false due to reference
// at compile time by dmiNode.regmap
val HAWINDOWSELWrData = WireInit(0.U.asTypeOf(new HAWINDOWSELFields()))
val HAWINDOWSELWrEn = WireInit(false.B)
val HAWINDOWRdData = WireInit(0.U.asTypeOf(new HAWINDOWFields()))
val HAWINDOWWrData = WireInit(0.U.asTypeOf(new HAWINDOWFields()))
val HAWINDOWWrEn = WireInit(false.B)
/** whether the hart is selected */
def hartSelected(hart: Int): Bool = {
((io.innerCtrl.bits.hartsel === hart.U) ||
(if (supportHartArray) io.innerCtrl.bits.hasel && io.innerCtrl.bits.hamask(hart) else false.B))
}
val HAWINDOWSELNxt = WireInit(0.U.asTypeOf(new HAWINDOWSELFields()))
val HAWINDOWSELReg = RegNext(next=HAWINDOWSELNxt, init=0.U.asTypeOf(HAWINDOWSELNxt))
if (supportHartArray) {
val HAWINDOWSELReset = WireInit(0.U.asTypeOf(new HAWINDOWSELFields()))
HAWINDOWSELNxt := HAWINDOWSELReg
when (~dmactive || ~dmAuthenticated) {
HAWINDOWSELNxt := HAWINDOWSELReset
} .otherwise {
when (HAWINDOWSELWrEn) {
// Unneeded upper bits of HAWINDOWSEL are tied to 0. Entire register is 0 if all harts fit in one window
if (nComponents > haWindowSize) {
HAWINDOWSELNxt.hawindowsel := HAWINDOWSELWrData.hawindowsel & ((1 << (log2Up(nComponents) - 5)) - 1).U
} else {
HAWINDOWSELNxt.hawindowsel := 0.U
}
}
}
val numHAMASKSlices = ((nComponents - 1)/haWindowSize)+1
HAWINDOWRdData.maskdata := 0.U // default, overridden below
// for each slice,use a hamaskReg to store the selection info
for (ii <- 0 until numHAMASKSlices) {
val sliceMask = if (nComponents > ((ii*haWindowSize) + haWindowSize-1)) (BigInt(1) << haWindowSize) - 1 // All harts in this slice exist
else (BigInt(1)<<(nComponents - (ii*haWindowSize))) - 1 // Partial last slice
val HAMASKRst = WireInit(0.U.asTypeOf(new HAWINDOWFields()))
val HAMASKNxt = WireInit(0.U.asTypeOf(new HAWINDOWFields()))
val HAMASKReg = RegNext(next=HAMASKNxt, init=0.U.asTypeOf(HAMASKNxt))
when (ii.U === HAWINDOWSELReg.hawindowsel) {
HAWINDOWRdData.maskdata := HAMASKReg.asUInt & sliceMask.U
}
HAMASKNxt.maskdata := HAMASKReg.asUInt
when (~dmactive || ~dmAuthenticated) {
HAMASKNxt := HAMASKRst
}.otherwise {
when (HAWINDOWWrEn && (ii.U === HAWINDOWSELReg.hawindowsel)) {
HAMASKNxt.maskdata := HAWINDOWWrData.maskdata
}
}
// drive each slice of hamask with stored HAMASKReg or with new value being written
for (jj <- 0 until haWindowSize) {
if (((ii*haWindowSize) + jj) < nComponents) {
val tempWrData = HAWINDOWWrData.maskdata.asBools
val tempMaskReg = HAMASKReg.asUInt.asBools
when (HAWINDOWWrEn && (ii.U === HAWINDOWSELReg.hawindowsel)) {
hamask(ii*haWindowSize + jj) := tempWrData(jj)
}.otherwise {
hamask(ii*haWindowSize + jj) := tempMaskReg(jj)
}
}
}
}
}
//--------------------------------------------------------------
// Halt-on-reset
// hrmaskReg is current set of harts that should halt-on-reset
// Reset state (dmactive=0) is all zeroes
// Bits are set by writing 1 to DMCONTROL.setresethaltreq
// Bits are cleared by writing 1 to DMCONTROL.clrresethaltreq
// Spec says if both are 1, then clrresethaltreq is executed
// hrmask is the halt-on-reset mask which will be sent to inner
//--------------------------------------------------------------
val hrmask = Wire(Vec(nComponents, Bool()))
val hrmaskNxt = Wire(Vec(nComponents, Bool()))
val hrmaskReg = RegNext(next=hrmaskNxt, init=0.U.asTypeOf(hrmaskNxt)).suggestName("hrmaskReg")
hrmaskNxt := hrmaskReg
for (component <- 0 until nComponents) {
when (~dmactive || ~dmAuthenticated) {
hrmaskNxt(component) := false.B
}.elsewhen (clrresethaltreqWrEn && DMCONTROLWrData.clrresethaltreq && hartSelected(component)) {
hrmaskNxt(component) := false.B
}.elsewhen (setresethaltreqWrEn && DMCONTROLWrData.setresethaltreq && hartSelected(component)) {
hrmaskNxt(component) := true.B
}
}
hrmask := hrmaskNxt
val dmControlRegFields = RegFieldGroup("dmcontrol", Some("debug module control register"), Seq(
WNotifyVal(1, DMCONTROLReg.dmactive & io.ctrl.dmactiveAck, DMCONTROLWrData.dmactive, dmactiveWrEn,
RegFieldDesc("dmactive", "debug module active", reset=Some(0))),
WNotifyVal(1, DMCONTROLReg.ndmreset, DMCONTROLWrData.ndmreset, ndmresetWrEn,
RegFieldDesc("ndmreset", "debug module reset output", reset=Some(0))),
WNotifyVal(1, 0.U, DMCONTROLWrData.clrresethaltreq, clrresethaltreqWrEn,
RegFieldDesc("clrresethaltreq", "clear reset halt request", reset=Some(0), access=RegFieldAccessType.W)),
WNotifyVal(1, 0.U, DMCONTROLWrData.setresethaltreq, setresethaltreqWrEn,
RegFieldDesc("setresethaltreq", "set reset halt request", reset=Some(0), access=RegFieldAccessType.W)),
RegField(12),
if (nComponents > 1) WNotifyVal(p(MaxHartIdBits),
DMCONTROLReg.hartsello, DMCONTROLWrData.hartsello, hartselloWrEn,
RegFieldDesc("hartsello", "hart select low", reset=Some(0)))
else RegField(1),
if (nComponents > 1) RegField(10-p(MaxHartIdBits))
else RegField(9),
if (supportHartArray)
WNotifyVal(1, DMCONTROLReg.hasel, DMCONTROLWrData.hasel, haselWrEn,
RegFieldDesc("hasel", "hart array select", reset=Some(0)))
else RegField(1),
RegField(1),
WNotifyVal(1, 0.U, DMCONTROLWrData.ackhavereset, ackhaveresetWrEn,
RegFieldDesc("ackhavereset", "acknowledge reset", reset=Some(0), access=RegFieldAccessType.W)),
if (cfg.hasHartResets)
WNotifyVal(1, DMCONTROLReg.hartreset, DMCONTROLWrData.hartreset, hartresetWrEn,
RegFieldDesc("hartreset", "hart reset request", reset=Some(0)))
else RegField(1),
WNotifyVal(1, 0.U, DMCONTROLWrData.resumereq, resumereqWrEn,
RegFieldDesc("resumereq", "resume request", reset=Some(0), access=RegFieldAccessType.W)),
WNotifyVal(1, DMCONTROLReg.haltreq, DMCONTROLWrData.haltreq, haltreqWrEn, // Spec says W, but maintaining previous behavior
RegFieldDesc("haltreq", "halt request", reset=Some(0)))
))
val hartinfoRegFields = RegFieldGroup("dmi_hartinfo", Some("hart information"), Seq(
RegField.r(12, HARTINFORdData.dataaddr, RegFieldDesc("dataaddr", "data address", reset=Some(if (cfg.atzero) DsbRegAddrs.DATA else 0))),
RegField.r(4, HARTINFORdData.datasize, RegFieldDesc("datasize", "number of DATA registers", reset=Some(if (cfg.atzero) cfg.nAbstractDataWords else 0))),
RegField.r(1, HARTINFORdData.dataaccess, RegFieldDesc("dataaccess", "data access type", reset=Some(if (cfg.atzero) 1 else 0))),
RegField(3),
RegField.r(4, HARTINFORdData.nscratch, RegFieldDesc("nscratch", "number of scratch registers", reset=Some(if (cfg.atzero) cfg.nScratch else 0)))
))
//--------------------------------------------------------------
// DMI register decoder for Outer
//--------------------------------------------------------------
// regmap addresses are byte offsets from lowest address
def DMI_DMCONTROL_OFFSET = 0
def DMI_HARTINFO_OFFSET = ((DMI_HARTINFO - DMI_DMCONTROL) << 2)
def DMI_HAWINDOWSEL_OFFSET = ((DMI_HAWINDOWSEL - DMI_DMCONTROL) << 2)
def DMI_HAWINDOW_OFFSET = ((DMI_HAWINDOW - DMI_DMCONTROL) << 2)
val omRegMap = dmiNode.regmap(
DMI_DMCONTROL_OFFSET -> dmControlRegFields,
DMI_HARTINFO_OFFSET -> hartinfoRegFields,
DMI_HAWINDOWSEL_OFFSET -> (if (supportHartArray && (nComponents > 32)) Seq(
WNotifyVal(log2Up(nComponents)-5, HAWINDOWSELReg.hawindowsel, HAWINDOWSELWrData.hawindowsel, HAWINDOWSELWrEn,
RegFieldDesc("hawindowsel", "hart array window select", reset=Some(0)))) else Nil),
DMI_HAWINDOW_OFFSET -> (if (supportHartArray) Seq(
WNotifyVal(if (nComponents > 31) 32 else nComponents, HAWINDOWRdData.maskdata, HAWINDOWWrData.maskdata, HAWINDOWWrEn,
RegFieldDesc("hawindow", "hart array window", reset=Some(0), volatile=(nComponents > 32)))) else Nil)
)
//--------------------------------------------------------------
// Interrupt Registers
//--------------------------------------------------------------
val debugIntNxt = WireInit(VecInit(Seq.fill(nComponents) {false.B} ))
val debugIntRegs = RegNext(next=debugIntNxt, init=0.U.asTypeOf(debugIntNxt)).suggestName("debugIntRegs")
debugIntNxt := debugIntRegs
val (intnode_out, _) = intnode.out.unzip
for (component <- 0 until nComponents) {
intnode_out(component)(0) := debugIntRegs(component) | io.hgDebugInt(component)
}
// sends debug interruption to Core when dmcs.haltreq is set,
for (component <- 0 until nComponents) {
when (~dmactive || ~dmAuthenticated) {
debugIntNxt(component) := false.B
}. otherwise {
when (haltreqWrEn && ((DMCONTROLWrData.hartsello === component.U)
|| (if (supportHartArray) DMCONTROLWrData.hasel && hamask(component) else false.B))) {
debugIntNxt(component) := DMCONTROLWrData.haltreq
}
}
}
// Halt request registers are set & cleared by writes to DMCONTROL.haltreq
// resumereq also causes the core to execute a 'dret',
// so resumereq is passed through to Inner.
// hartsel/hasel/hamask must also be used by the DebugModule state machine,
// so it is passed to Inner.
// These registers ensure that requests to dmInner are not lost if inner clock isn't running or requests occur too close together.
// If the innerCtrl async queue is not ready, the notification will be posted and held until ready is received.
// Additional notifications that occur while one is already waiting update the pending data so that the last value written is sent.
// Volatile events resumereq and ackhavereset are registered when they occur and remain pending until ready is received.
val innerCtrlValid = Wire(Bool())
val innerCtrlValidReg = RegInit(false.B).suggestName("innerCtrlValidReg")
val innerCtrlResumeReqReg = RegInit(false.B).suggestName("innerCtrlResumeReqReg")
val innerCtrlAckHaveResetReg = RegInit(false.B).suggestName("innerCtrlAckHaveResetReg")
innerCtrlValid := hartselloWrEn | resumereqWrEn | ackhaveresetWrEn | setresethaltreqWrEn | clrresethaltreqWrEn | haselWrEn |
(HAWINDOWWrEn & supportHartArray.B)
innerCtrlValidReg := io.innerCtrl.valid & ~io.innerCtrl.ready // Hold innerctrl request until the async queue accepts it
innerCtrlResumeReqReg := io.innerCtrl.bits.resumereq & ~io.innerCtrl.ready // Hold resumereq until accepted
innerCtrlAckHaveResetReg := io.innerCtrl.bits.ackhavereset & ~io.innerCtrl.ready // Hold ackhavereset until accepted
io.innerCtrl.valid := innerCtrlValid | innerCtrlValidReg
io.innerCtrl.bits.hartsel := Mux(hartselloWrEn, DMCONTROLWrData.hartsello, DMCONTROLReg.hartsello)
io.innerCtrl.bits.resumereq := (resumereqWrEn & DMCONTROLWrData.resumereq) | innerCtrlResumeReqReg
io.innerCtrl.bits.ackhavereset := (ackhaveresetWrEn & DMCONTROLWrData.ackhavereset) | innerCtrlAckHaveResetReg
io.innerCtrl.bits.hrmask := hrmask
if (supportHartArray) {
io.innerCtrl.bits.hasel := Mux(haselWrEn, DMCONTROLWrData.hasel, DMCONTROLReg.hasel)
io.innerCtrl.bits.hamask := hamask
} else {
io.innerCtrl.bits.hasel := DontCare
io.innerCtrl.bits.hamask := DontCare
}
io.ctrl.ndreset := DMCONTROLReg.ndmreset
io.ctrl.dmactive := DMCONTROLReg.dmactive
// hart reset mechanism implementation
if (cfg.hasHartResets) {
val hartResetNxt = Wire(Vec(nComponents, Bool()))
val hartResetReg = RegNext(next=hartResetNxt, init=0.U.asTypeOf(hartResetNxt))
for (component <- 0 until nComponents) {
hartResetNxt(component) := DMCONTROLReg.hartreset & hartSelected(component)
io.hartResetReq.get(component) := hartResetReg(component)
}
}
omRegMap // FIXME: Remove this when withReset is removed
}}
}
// wrap a Outer with a DMIToTL, derived by dmi clock & reset
class TLDebugModuleOuterAsync(device: Device)(implicit p: Parameters) extends LazyModule {
val cfg = p(DebugModuleKey).get
val dmiXbar = LazyModule (new TLXbar(nameSuffix = Some("dmixbar")))
val dmi2tlOpt = (!p(ExportDebug).apb).option({
val dmi2tl = LazyModule(new DMIToTL())
dmiXbar.node := dmi2tl.node
dmi2tl
})
val apbNodeOpt = p(ExportDebug).apb.option({
val apb2tl = LazyModule(new APBToTL())
val apb2tlBuffer = LazyModule(new TLBuffer(BufferParams.pipe))
val dmTopAddr = (1 << cfg.nDMIAddrSize) << 2
val tlErrorParams = DevNullParams(AddressSet.misaligned(dmTopAddr, APBDebugConsts.apbDebugRegBase-dmTopAddr), maxAtomic=0, maxTransfer=4)
val tlError = LazyModule(new TLError(tlErrorParams, buffer=false))
val apbXbar = LazyModule(new APBFanout())
val apbRegs = LazyModule(new APBDebugRegisters())
apbRegs.node := apbXbar.node
apb2tl.node := apbXbar.node
apb2tlBuffer.node := apb2tl.node
dmiXbar.node := apb2tlBuffer.node
tlError.node := dmiXbar.node
apbXbar.node
})
val dmOuter = LazyModule( new TLDebugModuleOuter(device))
val intnode = IntSyncIdentityNode()
intnode :*= IntSyncCrossingSource(alreadyRegistered = true) :*= dmOuter.intnode
val dmiBypass = LazyModule(new TLBusBypass(beatBytes=4, bufferError=false, maxAtomic=0, maxTransfer=4))
val dmiInnerNode = TLAsyncCrossingSource() := dmiBypass.node := dmiXbar.node
dmOuter.dmiNode := dmiXbar.node
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val nComponents = dmOuter.intnode.edges.out.size
val io = IO(new Bundle {
val dmi_clock = Input(Clock())
val dmi_reset = Input(Reset())
/** Debug Module Interface bewteen DM and DTM
*
* The DTM provides access to one or more Debug Modules (DMs) using DMI
*/
val dmi = (!p(ExportDebug).apb).option(Flipped(new DMIIO()(p)))
// Optional APB Interface is fully diplomatic so is not listed here.
val ctrl = new DebugCtrlBundle(nComponents)
/** conrol signals for Inner, generated in Outer */
val innerCtrl = new AsyncBundle(new DebugInternalBundle(nComponents), AsyncQueueParams.singleton(safe=cfg.crossingHasSafeReset))
/** debug interruption generated in Inner */
val hgDebugInt = Input(Vec(nComponents, Bool()))
/** hart reset request to core */
val hartResetReq = p(DebugModuleKey).get.hasHartResets.option(Output(Vec(nComponents, Bool())))
/** Authentication signal from core */
val dmAuthenticated = p(DebugModuleKey).get.hasAuthentication.option(Input(Bool()))
})
val rf_reset = IO(Input(Reset())) // RF transform
childClock := io.dmi_clock
childReset := io.dmi_reset
override def provideImplicitClockToLazyChildren = true
withClockAndReset(childClock, childReset) {
dmi2tlOpt.foreach { _.module.io.dmi <> io.dmi.get }
val dmactiveAck = AsyncResetSynchronizerShiftReg(in=io.ctrl.dmactiveAck, sync=3, name=Some("dmactiveAckSync"))
dmiBypass.module.io.bypass := ~io.ctrl.dmactive | ~dmactiveAck
io.ctrl <> dmOuter.module.io.ctrl
dmOuter.module.io.ctrl.dmactiveAck := dmactiveAck // send synced version down to dmOuter
io.innerCtrl <> ToAsyncBundle(dmOuter.module.io.innerCtrl, AsyncQueueParams.singleton(safe=cfg.crossingHasSafeReset))
dmOuter.module.io.hgDebugInt := io.hgDebugInt
io.hartResetReq.foreach { x => dmOuter.module.io.hartResetReq.foreach {y => x := y}}
io.dmAuthenticated.foreach { x => dmOuter.module.io.dmAuthenticated.foreach { y => y := x}}
}
}
}
class TLDebugModuleInner(device: Device, getNComponents: () => Int, beatBytes: Int)(implicit p: Parameters) extends LazyModule
{
// For Shorter Register Names
import DMI_RegAddrs._
val cfg = p(DebugModuleKey).get
def getCfg = () => cfg
val dmTopAddr = (1 << cfg.nDMIAddrSize) << 2
/** dmiNode address set */
val dmiNode = TLRegisterNode(
// Address is range 0 to 0x1FF except DMCONTROL, HARTINFO, HAWINDOWSEL, HAWINDOW which are handled by Outer
address = AddressSet.misaligned(0, DMI_DMCONTROL << 2) ++
AddressSet.misaligned((DMI_DMCONTROL + 1) << 2, ((DMI_HARTINFO << 2) - ((DMI_DMCONTROL + 1) << 2))) ++
AddressSet.misaligned((DMI_HARTINFO + 1) << 2, ((DMI_HAWINDOWSEL << 2) - ((DMI_HARTINFO + 1) << 2))) ++
AddressSet.misaligned((DMI_HAWINDOW + 1) << 2, (dmTopAddr - ((DMI_HAWINDOW + 1) << 2))),
device = device,
beatBytes = 4,
executable = false
)
val tlNode = TLRegisterNode(
address=Seq(cfg.address),
device=device,
beatBytes=beatBytes,
executable=true
)
val sb2tlOpt = cfg.hasBusMaster.option(LazyModule(new SBToTL()))
// If we want to support custom registers read through Abstract Commands,
// provide a place to bring them into the debug module. What this connects
// to is up to the implementation.
val customNode = new DebugCustomSink()
lazy val module = new Impl
class Impl extends LazyModuleImp(this){
val nComponents = getNComponents()
Annotated.params(this, cfg)
val supportHartArray = cfg.supportHartArray & (nComponents > 1)
val nExtTriggers = cfg.nExtTriggers
val nHaltGroups = if ((nComponents > 1) | (nExtTriggers > 0)) cfg.nHaltGroups
else 0 // no halt groups possible if single hart with no external triggers
val hartSelFuncs = if (getNComponents() > 1) p(DebugModuleHartSelKey) else DebugModuleHartSelFuncs(
hartIdToHartSel = (x) => 0.U,
hartSelToHartId = (x) => x
)
val io = IO(new Bundle {
/** dm reset signal passed in from Outer */
val dmactive = Input(Bool())
/** conrol signals for Inner
*
* it's generated by Outer and comes in
*/
val innerCtrl = Flipped(new DecoupledIO(new DebugInternalBundle(nComponents)))
/** debug unavail signal passed in from Outer*/
val debugUnavail = Input(Vec(nComponents, Bool()))
/** debug interruption from Inner to Outer
*
* contain 2 type of debug interruption causes:
* - halt group
* - halt-on-reset
*/
val hgDebugInt = Output(Vec(nComponents, Bool()))
/** interface for trigger */
val extTrigger = (nExtTriggers > 0).option(new DebugExtTriggerIO())
/** vector to indicate which hart is in reset
*
* dm receives it from core and sends it to Inner
*/
val hartIsInReset = Input(Vec(nComponents, Bool()))
val tl_clock = Input(Clock())
val tl_reset = Input(Reset())
/** Debug Authentication signals from core */
val auth = cfg.hasAuthentication.option(new DebugAuthenticationIO())
})
sb2tlOpt.map { sb =>
sb.module.clock := io.tl_clock
sb.module.reset := io.tl_reset
sb.module.rf_reset := io.tl_reset
}
//--------------------------------------------------------------
// Import constants for shorter variable names
//--------------------------------------------------------------
import DMI_RegAddrs._
import DsbRegAddrs._
import DsbBusConsts._
//--------------------------------------------------------------
// Sanity Check Configuration For this implementation.
//--------------------------------------------------------------
require (cfg.supportQuickAccess == false, "No Quick Access support yet")
require ((nHaltGroups > 0) || (nExtTriggers == 0), "External triggers require at least 1 halt group")
//--------------------------------------------------------------
// Register & Wire Declarations (which need to be pre-declared)
//--------------------------------------------------------------
// run control regs: tracking all the harts
// implements: see implementation-specific bits part
/** all harts halted status */
val haltedBitRegs = Reg(UInt(nComponents.W))
/** all harts resume request status */
val resumeReqRegs = Reg(UInt(nComponents.W))
/** all harts have reset status */
val haveResetBitRegs = Reg(UInt(nComponents.W))
// default is 1,after resume, resumeAcks get 0
/** all harts resume ack status */
val resumeAcks = Wire(UInt(nComponents.W))
// --- regmapper outputs
// hart state Id and En
// in Hart Bus Access ROM
val hartHaltedWrEn = Wire(Bool())
val hartHaltedId = Wire(UInt(sbIdWidth.W))
val hartGoingWrEn = Wire(Bool())
val hartGoingId = Wire(UInt(sbIdWidth.W))
val hartResumingWrEn = Wire(Bool())
val hartResumingId = Wire(UInt(sbIdWidth.W))
val hartExceptionWrEn = Wire(Bool())
val hartExceptionId = Wire(UInt(sbIdWidth.W))
// progbuf and abstract data: byte-addressable control logic
// AccessLegal is set only when state = waiting
// RdEn and WrEnMaybe : contrl signal drived by DMI bus
val dmiProgramBufferRdEn = WireInit(VecInit(Seq.fill(cfg.nProgramBufferWords * 4) {false.B} ))
val dmiProgramBufferAccessLegal = WireInit(false.B)
val dmiProgramBufferWrEnMaybe = WireInit(VecInit(Seq.fill(cfg.nProgramBufferWords * 4) {false.B} ))
val dmiAbstractDataRdEn = WireInit(VecInit(Seq.fill(cfg.nAbstractDataWords * 4) {false.B} ))
val dmiAbstractDataAccessLegal = WireInit(false.B)
val dmiAbstractDataWrEnMaybe = WireInit(VecInit(Seq.fill(cfg.nAbstractDataWords * 4) {false.B} ))
//--------------------------------------------------------------
// Registers coming from 'CONTROL' in Outer
//--------------------------------------------------------------
val dmAuthenticated = io.auth.map(a => a.dmAuthenticated).getOrElse(true.B)
val selectedHartReg = Reg(UInt(p(MaxHartIdBits).W))
// hamaskFull is a vector of all selected harts including hartsel, whether or not supportHartArray is true
val hamaskFull = WireInit(VecInit(Seq.fill(nComponents) {false.B} ))
if (nComponents > 1) {
when (~io.dmactive) {
selectedHartReg := 0.U
}.elsewhen (io.innerCtrl.fire){
selectedHartReg := io.innerCtrl.bits.hartsel
}
}
if (supportHartArray) {
val hamaskZero = WireInit(VecInit(Seq.fill(nComponents) {false.B} ))
val hamaskReg = Reg(Vec(nComponents, Bool()))
when (~io.dmactive || ~dmAuthenticated) {
hamaskReg := hamaskZero
}.elsewhen (io.innerCtrl.fire){
hamaskReg := Mux(io.innerCtrl.bits.hasel, io.innerCtrl.bits.hamask, hamaskZero)
}
hamaskFull := hamaskReg
}
// Outer.hamask doesn't consider the hart selected by dmcontrol.hartsello,
// so append it here
when (selectedHartReg < nComponents.U) {
hamaskFull(if (nComponents == 1) 0.U(0.W) else selectedHartReg) := true.B
}
io.innerCtrl.ready := true.B
// Construct a Vec from io.innerCtrl fields indicating whether each hart is being selected in this write
// A hart may be selected by hartsel field or by hart array
val hamaskWrSel = WireInit(VecInit(Seq.fill(nComponents) {false.B} ))
for (component <- 0 until nComponents ) {
hamaskWrSel(component) := ((io.innerCtrl.bits.hartsel === component.U) ||
(if (supportHartArray) io.innerCtrl.bits.hasel && io.innerCtrl.bits.hamask(component) else false.B))
}
//-------------------------------------
// Halt-on-reset logic
// hrmask is set in dmOuter and passed in
// Debug interrupt is generated when a reset occurs whose corresponding hrmask bit is set
// Debug interrupt is maintained until the hart enters halted state
//-------------------------------------
val hrReset = WireInit(VecInit(Seq.fill(nComponents) { false.B } ))
val hrDebugInt = Wire(Vec(nComponents, Bool()))
val hrmaskReg = RegInit(hrReset)
val hartIsInResetSync = Wire(Vec(nComponents, Bool()))
for (component <- 0 until nComponents) {
hartIsInResetSync(component) := AsyncResetSynchronizerShiftReg(io.hartIsInReset(component), 3, Some(s"debug_hartReset_$component"))
}
when (~io.dmactive || ~dmAuthenticated) {
hrmaskReg := hrReset
}.elsewhen (io.innerCtrl.fire){
hrmaskReg := io.innerCtrl.bits.hrmask
}
withReset(reset.asAsyncReset) { // ensure interrupt requests are negated at first clock edge
val hrDebugIntReg = RegInit(VecInit(Seq.fill(nComponents) { false.B } ))
when (~io.dmactive || ~dmAuthenticated) {
hrDebugIntReg := hrReset
}.otherwise {
hrDebugIntReg := hrmaskReg &
(hartIsInResetSync | // set debugInt during reset
(hrDebugIntReg & ~(haltedBitRegs.asBools))) // maintain until core halts
}
hrDebugInt := hrDebugIntReg
}
//--------------------------------------------------------------
// DMI Registers
//--------------------------------------------------------------
//----DMSTATUS
val DMSTATUSRdData = WireInit(0.U.asTypeOf(new DMSTATUSFields()))
DMSTATUSRdData.authenticated := dmAuthenticated
DMSTATUSRdData.version := 2.U // Version 0.13
io.auth.map(a => DMSTATUSRdData.authbusy := a.dmAuthBusy)
val resumereq = io.innerCtrl.fire && io.innerCtrl.bits.resumereq
when (dmAuthenticated) {
DMSTATUSRdData.hasresethaltreq := true.B
DMSTATUSRdData.anynonexistent := (selectedHartReg >= nComponents.U) // only hartsel can be nonexistent
// all harts nonexistent if hartsel is out of range and there are no harts selected in the hart array
DMSTATUSRdData.allnonexistent := (selectedHartReg >= nComponents.U) & (~hamaskFull.reduce(_ | _))
when (~DMSTATUSRdData.allnonexistent) { // if no existent harts selected, all other status is false
DMSTATUSRdData.anyunavail := (io.debugUnavail & hamaskFull).reduce(_ | _)
DMSTATUSRdData.anyhalted := ((~io.debugUnavail & (haltedBitRegs.asBools)) & hamaskFull).reduce(_ | _)
DMSTATUSRdData.anyrunning := ((~io.debugUnavail & ~(haltedBitRegs.asBools)) & hamaskFull).reduce(_ | _)
DMSTATUSRdData.anyhavereset := (haveResetBitRegs.asBools & hamaskFull).reduce(_ | _)
DMSTATUSRdData.anyresumeack := (resumeAcks.asBools & hamaskFull).reduce(_ | _)
when (~DMSTATUSRdData.anynonexistent) { // if one hart is nonexistent, no 'all' status is set
DMSTATUSRdData.allunavail := (io.debugUnavail | ~hamaskFull).reduce(_ & _)
DMSTATUSRdData.allhalted := ((~io.debugUnavail & (haltedBitRegs.asBools)) | ~hamaskFull).reduce(_ & _)
DMSTATUSRdData.allrunning := ((~io.debugUnavail & ~(haltedBitRegs.asBools)) | ~hamaskFull).reduce(_ & _)
DMSTATUSRdData.allhavereset := (haveResetBitRegs.asBools | ~hamaskFull).reduce(_ & _)
DMSTATUSRdData.allresumeack := (resumeAcks.asBools | ~hamaskFull).reduce(_ & _)
}
}
//TODO
DMSTATUSRdData.confstrptrvalid := false.B
DMSTATUSRdData.impebreak := (cfg.hasImplicitEbreak).B
}
when(~io.dmactive || ~dmAuthenticated) {
haveResetBitRegs := 0.U
}.otherwise {
when (io.innerCtrl.fire && io.innerCtrl.bits.ackhavereset) {
haveResetBitRegs := (haveResetBitRegs & (~(hamaskWrSel.asUInt))) | hartIsInResetSync.asUInt
}.otherwise {
haveResetBitRegs := haveResetBitRegs | hartIsInResetSync.asUInt
}
}
//----DMCS2 (Halt Groups)
val DMCS2RdData = WireInit(0.U.asTypeOf(new DMCS2Fields()))
val DMCS2WrData = WireInit(0.U.asTypeOf(new DMCS2Fields()))
val hgselectWrEn = WireInit(false.B)
val hgwriteWrEn = WireInit(false.B)
val haltgroupWrEn = WireInit(false.B)
val exttriggerWrEn = WireInit(false.B)
val hgDebugInt = WireInit(VecInit(Seq.fill(nComponents) {false.B} ))
if (nHaltGroups > 0) withReset (reset.asAsyncReset) { // async reset ensures triggers don't falsely fire during startup
val hgBits = log2Up(nHaltGroups)
// hgParticipate: Each entry indicates which hg that entity belongs to (1 to nHartGroups). 0 means no hg assigned.
val hgParticipateHart = RegInit(VecInit(Seq.fill(nComponents)(0.U(hgBits.W))))
val hgParticipateTrig = if (nExtTriggers > 0) RegInit(VecInit(Seq.fill(nExtTriggers)(0.U(hgBits.W)))) else Nil
// assign group index to current seledcted harts
for (component <- 0 until nComponents) {
when (~io.dmactive || ~dmAuthenticated) {
hgParticipateHart(component) := 0.U
}.otherwise {
when (haltgroupWrEn & DMCS2WrData.hgwrite & ~DMCS2WrData.hgselect &
hamaskFull(component) & (DMCS2WrData.haltgroup <= nHaltGroups.U)) {
hgParticipateHart(component) := DMCS2WrData.haltgroup
}
}
}
DMCS2RdData.haltgroup := hgParticipateHart(if (nComponents == 1) 0.U(0.W) else selectedHartReg)
if (nExtTriggers > 0) {
val hgSelect = Reg(Bool())
when (~io.dmactive || ~dmAuthenticated) {
hgSelect := false.B
}.otherwise {
when (hgselectWrEn) {
hgSelect := DMCS2WrData.hgselect
}
}
// assign group index to trigger
for (trigger <- 0 until nExtTriggers) {
when (~io.dmactive || ~dmAuthenticated) {
hgParticipateTrig(trigger) := 0.U
}.otherwise {
when (haltgroupWrEn & DMCS2WrData.hgwrite & DMCS2WrData.hgselect &
(DMCS2WrData.exttrigger === trigger.U) & (DMCS2WrData.haltgroup <= nHaltGroups.U)) {
hgParticipateTrig(trigger) := DMCS2WrData.haltgroup
}
}
}
DMCS2RdData.hgselect := hgSelect
when (hgSelect) {
DMCS2RdData.haltgroup := hgParticipateTrig(0)
}
// If there is only 1 ext trigger, then the exttrigger field is fixed at 0
// Otherwise, instantiate a register with only the number of bits required
if (nExtTriggers > 1) {
val trigBits = log2Up(nExtTriggers-1)
val hgExtTrigger = Reg(UInt(trigBits.W))
when (~io.dmactive || ~dmAuthenticated) {
hgExtTrigger := 0.U
}.otherwise {
when (exttriggerWrEn & (DMCS2WrData.exttrigger < nExtTriggers.U)) {
hgExtTrigger := DMCS2WrData.exttrigger
}
}
DMCS2RdData.exttrigger := hgExtTrigger
when (hgSelect) {
DMCS2RdData.haltgroup := hgParticipateTrig(hgExtTrigger)
}
}
}
// Halt group state machine
// IDLE: Go to FIRED when any hart in this hg writes to HALTED while its HaltedBitRegs=0
// or when any trigin assigned to this hg occurs
// FIRED: Back to IDLE when all harts in this hg have set their haltedBitRegs
// and all trig out in this hg have been acknowledged
val hgFired = RegInit (VecInit(Seq.fill(nHaltGroups+1) {false.B} ))
val hgHartFiring = WireInit(VecInit(Seq.fill(nHaltGroups+1) {false.B} )) // which hg's are firing due to hart halting
val hgTrigFiring = WireInit(VecInit(Seq.fill(nHaltGroups+1) {false.B} )) // which hg's are firing due to trig in
val hgHartsAllHalted = WireInit(VecInit(Seq.fill(nHaltGroups+1) {false.B} )) // in which hg's have all harts halted
val hgTrigsAllAcked = WireInit(VecInit(Seq.fill(nHaltGroups+1) { true.B} )) // in which hg's have all trigouts been acked
io.extTrigger.foreach {extTrigger =>
val extTriggerInReq = Wire(Vec(nExtTriggers, Bool()))
val extTriggerOutAck = Wire(Vec(nExtTriggers, Bool()))
extTriggerInReq := extTrigger.in.req.asBools
extTriggerOutAck := extTrigger.out.ack.asBools
val trigInReq = ResetSynchronizerShiftReg(in=extTriggerInReq, sync=3, name=Some("dm_extTriggerInReqSync"))
val trigOutAck = ResetSynchronizerShiftReg(in=extTriggerOutAck, sync=3, name=Some("dm_extTriggerOutAckSync"))
for (hg <- 1 to nHaltGroups) {
hgTrigFiring(hg) := (trigInReq & ~RegNext(trigInReq) & hgParticipateTrig.map(_ === hg.U)).reduce(_ | _)
hgTrigsAllAcked(hg) := (trigOutAck | hgParticipateTrig.map(_ =/= hg.U)).reduce(_ & _)
}
extTrigger.in.ack := trigInReq.asUInt
}
for (hg <- 1 to nHaltGroups) {
hgHartFiring(hg) := hartHaltedWrEn & ~haltedBitRegs(hartHaltedId) & (hgParticipateHart(hartSelFuncs.hartIdToHartSel(hartHaltedId)) === hg.U)
hgHartsAllHalted(hg) := (haltedBitRegs.asBools | hgParticipateHart.map(_ =/= hg.U)).reduce(_ & _)
when (~io.dmactive || ~dmAuthenticated) {
hgFired(hg) := false.B
}.elsewhen (~hgFired(hg) & (hgHartFiring(hg) | hgTrigFiring(hg))) {
hgFired(hg) := true.B
}.elsewhen ( hgFired(hg) & hgHartsAllHalted(hg) & hgTrigsAllAcked(hg)) {
hgFired(hg) := false.B
}
}
// For each hg that has fired, assert debug interrupt to each hart in that hg
for (component <- 0 until nComponents) {
hgDebugInt(component) := hgFired(hgParticipateHart(component))
}
// For each hg that has fired, assert trigger out for all external triggers in that hg
io.extTrigger.foreach {extTrigger =>
val extTriggerOutReq = RegInit(VecInit(Seq.fill(cfg.nExtTriggers) {false.B} ))
for (trig <- 0 until nExtTriggers) {
extTriggerOutReq(trig) := hgFired(hgParticipateTrig(trig))
}
extTrigger.out.req := extTriggerOutReq.asUInt
}
}
io.hgDebugInt := hgDebugInt | hrDebugInt
//----HALTSUM*
val numHaltedStatus = ((nComponents - 1) / 32) + 1
val haltedStatus = Wire(Vec(numHaltedStatus, Bits(32.W)))
for (ii <- 0 until numHaltedStatus) {
when (dmAuthenticated) {
haltedStatus(ii) := haltedBitRegs >> (ii*32)
}.otherwise {
haltedStatus(ii) := 0.U
}
}
val haltedSummary = Cat(haltedStatus.map(_.orR).reverse)
val HALTSUM1RdData = haltedSummary.asTypeOf(new HALTSUM1Fields())
val selectedHaltedStatus = Mux((selectedHartReg >> 5) > numHaltedStatus.U, 0.U, haltedStatus(selectedHartReg >> 5))
val HALTSUM0RdData = selectedHaltedStatus.asTypeOf(new HALTSUM0Fields())
// Since we only support 1024 harts, we don't implement HALTSUM2 or HALTSUM3
//----ABSTRACTCS
val ABSTRACTCSReset = WireInit(0.U.asTypeOf(new ABSTRACTCSFields()))
ABSTRACTCSReset.datacount := cfg.nAbstractDataWords.U
ABSTRACTCSReset.progbufsize := cfg.nProgramBufferWords.U
val ABSTRACTCSReg = Reg(new ABSTRACTCSFields())
val ABSTRACTCSWrData = WireInit(0.U.asTypeOf(new ABSTRACTCSFields()))
val ABSTRACTCSRdData = WireInit(ABSTRACTCSReg)
val ABSTRACTCSRdEn = WireInit(false.B)
val ABSTRACTCSWrEnMaybe = WireInit(false.B)
val ABSTRACTCSWrEnLegal = WireInit(false.B)
val ABSTRACTCSWrEn = ABSTRACTCSWrEnMaybe && ABSTRACTCSWrEnLegal
// multiple error types
// find implement in the state machine part
val errorBusy = WireInit(false.B)
val errorException = WireInit(false.B)
val errorUnsupported = WireInit(false.B)
val errorHaltResume = WireInit(false.B)
when (~io.dmactive || ~dmAuthenticated) {
ABSTRACTCSReg := ABSTRACTCSReset
}.otherwise {
when (errorBusy){
ABSTRACTCSReg.cmderr := DebugAbstractCommandError.ErrBusy.id.U
}.elsewhen (errorException) {
ABSTRACTCSReg.cmderr := DebugAbstractCommandError.ErrException.id.U
}.elsewhen (errorUnsupported) {
ABSTRACTCSReg.cmderr := DebugAbstractCommandError.ErrNotSupported.id.U
}.elsewhen (errorHaltResume) {
ABSTRACTCSReg.cmderr := DebugAbstractCommandError.ErrHaltResume.id.U
}.otherwise {
//W1C
when (ABSTRACTCSWrEn){
ABSTRACTCSReg.cmderr := ABSTRACTCSReg.cmderr & ~(ABSTRACTCSWrData.cmderr);
}
}
}
// For busy, see below state machine.
val abstractCommandBusy = WireInit(true.B)
ABSTRACTCSRdData.busy := abstractCommandBusy
when (~dmAuthenticated) { // read value must be 0 when not authenticated
ABSTRACTCSRdData.datacount := 0.U
ABSTRACTCSRdData.progbufsize := 0.U
}
//---- ABSTRACTAUTO
// It is a mask indicating whether datai/probufi have the autoexcution permisson
// this part aims to produce 3 wires : autoexecData,autoexecProg,autoexec
// first two specify which reg supports autoexec
// autoexec is a control signal, meaning there is at least one enabled autoexec reg
// when autoexec is set, generate instructions using COMMAND register
val ABSTRACTAUTOReset = WireInit(0.U.asTypeOf(new ABSTRACTAUTOFields()))
val ABSTRACTAUTOReg = Reg(new ABSTRACTAUTOFields())
val ABSTRACTAUTOWrData = WireInit(0.U.asTypeOf(new ABSTRACTAUTOFields()))
val ABSTRACTAUTORdData = WireInit(ABSTRACTAUTOReg)
val ABSTRACTAUTORdEn = WireInit(false.B)
val autoexecdataWrEnMaybe = WireInit(false.B)
val autoexecprogbufWrEnMaybe = WireInit(false.B)
val ABSTRACTAUTOWrEnLegal = WireInit(false.B)
when (~io.dmactive || ~dmAuthenticated) {
ABSTRACTAUTOReg := ABSTRACTAUTOReset
}.otherwise {
when (autoexecprogbufWrEnMaybe && ABSTRACTAUTOWrEnLegal) {
ABSTRACTAUTOReg.autoexecprogbuf := ABSTRACTAUTOWrData.autoexecprogbuf & ( (1 << cfg.nProgramBufferWords) - 1).U
}
when (autoexecdataWrEnMaybe && ABSTRACTAUTOWrEnLegal) {
ABSTRACTAUTOReg.autoexecdata := ABSTRACTAUTOWrData.autoexecdata & ( (1 << cfg.nAbstractDataWords) - 1).U
}
}
// Abstract Data access vector(byte-addressable)
val dmiAbstractDataAccessVec = WireInit(VecInit(Seq.fill(cfg.nAbstractDataWords * 4) {false.B} ))
dmiAbstractDataAccessVec := (dmiAbstractDataWrEnMaybe zip dmiAbstractDataRdEn).map{ case (r,w) => r | w}
// Program Buffer access vector(byte-addressable)
val dmiProgramBufferAccessVec = WireInit(VecInit(Seq.fill(cfg.nProgramBufferWords * 4) {false.B} ))
dmiProgramBufferAccessVec := (dmiProgramBufferWrEnMaybe zip dmiProgramBufferRdEn).map{ case (r,w) => r | w}
// at least one word access
val dmiAbstractDataAccess = dmiAbstractDataAccessVec.reduce(_ || _ )
val dmiProgramBufferAccess = dmiProgramBufferAccessVec.reduce(_ || _)
// This will take the shorter of the lists, which is what we want.
val autoexecData = WireInit(VecInit(Seq.fill(cfg.nAbstractDataWords) {false.B} ))
val autoexecProg = WireInit(VecInit(Seq.fill(cfg.nProgramBufferWords) {false.B} ))
(autoexecData zip ABSTRACTAUTOReg.autoexecdata.asBools).zipWithIndex.foreach {case (t, i) => t._1 := dmiAbstractDataAccessVec(i * 4) && t._2 }
(autoexecProg zip ABSTRACTAUTOReg.autoexecprogbuf.asBools).zipWithIndex.foreach {case (t, i) => t._1 := dmiProgramBufferAccessVec(i * 4) && t._2}
val autoexec = autoexecData.reduce(_ || _) || autoexecProg.reduce(_ || _)
//---- COMMAND
val COMMANDReset = WireInit(0.U.asTypeOf(new COMMANDFields()))
val COMMANDReg = Reg(new COMMANDFields())
val COMMANDWrDataVal = WireInit(0.U(32.W))
val COMMANDWrData = WireInit(COMMANDWrDataVal.asTypeOf(new COMMANDFields()))
val COMMANDWrEnMaybe = WireInit(false.B)
val COMMANDWrEnLegal = WireInit(false.B)
val COMMANDRdEn = WireInit(false.B)
val COMMANDWrEn = COMMANDWrEnMaybe && COMMANDWrEnLegal
val COMMANDRdData = COMMANDReg
when (~io.dmactive || ~dmAuthenticated) {
COMMANDReg := COMMANDReset
}.otherwise {
when (COMMANDWrEn) {
COMMANDReg := COMMANDWrData
}
}
// --- Abstract Data
// These are byte addressible, s.t. the Processor can use
// byte-addressible instructions to store to them.
val abstractDataMem = Reg(Vec(cfg.nAbstractDataWords*4, UInt(8.W)))
val abstractDataNxt = WireInit(abstractDataMem)
// --- Program Buffer
// byte-addressible mem
val programBufferMem = Reg(Vec(cfg.nProgramBufferWords*4, UInt(8.W)))
val programBufferNxt = WireInit(programBufferMem)
//--------------------------------------------------------------
// These bits are implementation-specific bits set
// by harts executing code.
//--------------------------------------------------------------
// Run control logic
when (~io.dmactive || ~dmAuthenticated) {
haltedBitRegs := 0.U
resumeReqRegs := 0.U
}.otherwise {
//remove those harts in reset
resumeReqRegs := resumeReqRegs & ~(hartIsInResetSync.asUInt)
val hartHaltedIdIndex = UIntToOH(hartSelFuncs.hartIdToHartSel(hartHaltedId))
val hartResumingIdIndex = UIntToOH(hartSelFuncs.hartIdToHartSel(hartResumingId))
val hartselIndex = UIntToOH(io.innerCtrl.bits.hartsel)
when (hartHaltedWrEn) {
// add those harts halting and remove those in reset
haltedBitRegs := (haltedBitRegs | hartHaltedIdIndex) & ~(hartIsInResetSync.asUInt)
}.elsewhen (hartResumingWrEn) {
// remove those harts in reset and those in resume
haltedBitRegs := (haltedBitRegs & ~(hartResumingIdIndex)) & ~(hartIsInResetSync.asUInt)
}.otherwise {
// remove those harts in reset
haltedBitRegs := haltedBitRegs & ~(hartIsInResetSync.asUInt)
}
when (hartResumingWrEn) {
// remove those harts in resume and those in reset
resumeReqRegs := (resumeReqRegs & ~(hartResumingIdIndex)) & ~(hartIsInResetSync.asUInt)
}
when (resumereq) {
// set all sleceted harts to resumeReq, remove those in reset
resumeReqRegs := (resumeReqRegs | hamaskWrSel.asUInt) & ~(hartIsInResetSync.asUInt)
}
}
when (resumereq) {
// next cycle resumeAcls will be the negation of next cycle resumeReqRegs
resumeAcks := (~resumeReqRegs & ~(hamaskWrSel.asUInt))
}.otherwise {
resumeAcks := ~resumeReqRegs
}
//---- AUTHDATA
val authRdEnMaybe = WireInit(false.B)
val authWrEnMaybe = WireInit(false.B)
io.auth.map { a =>
a.dmactive := io.dmactive
a.dmAuthRead := authRdEnMaybe & ~a.dmAuthBusy
a.dmAuthWrite := authWrEnMaybe & ~a.dmAuthBusy
}
val dmstatusRegFields = RegFieldGroup("dmi_dmstatus", Some("debug module status register"), Seq(
RegField.r(4, DMSTATUSRdData.version, RegFieldDesc("version", "version", reset=Some(2))),
RegField.r(1, DMSTATUSRdData.confstrptrvalid, RegFieldDesc("confstrptrvalid", "confstrptrvalid", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.hasresethaltreq, RegFieldDesc("hasresethaltreq", "hasresethaltreq", reset=Some(1))),
RegField.r(1, DMSTATUSRdData.authbusy, RegFieldDesc("authbusy", "authbusy", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.authenticated, RegFieldDesc("authenticated", "authenticated", reset=Some(1))),
RegField.r(1, DMSTATUSRdData.anyhalted, RegFieldDesc("anyhalted", "anyhalted", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.allhalted, RegFieldDesc("allhalted", "allhalted", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.anyrunning, RegFieldDesc("anyrunning", "anyrunning", reset=Some(1))),
RegField.r(1, DMSTATUSRdData.allrunning, RegFieldDesc("allrunning", "allrunning", reset=Some(1))),
RegField.r(1, DMSTATUSRdData.anyunavail, RegFieldDesc("anyunavail", "anyunavail", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.allunavail, RegFieldDesc("allunavail", "allunavail", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.anynonexistent, RegFieldDesc("anynonexistent", "anynonexistent", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.allnonexistent, RegFieldDesc("allnonexistent", "allnonexistent", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.anyresumeack, RegFieldDesc("anyresumeack", "anyresumeack", reset=Some(1))),
RegField.r(1, DMSTATUSRdData.allresumeack, RegFieldDesc("allresumeack", "allresumeack", reset=Some(1))),
RegField.r(1, DMSTATUSRdData.anyhavereset, RegFieldDesc("anyhavereset", "anyhavereset", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.allhavereset, RegFieldDesc("allhavereset", "allhavereset", reset=Some(0))),
RegField(2),
RegField.r(1, DMSTATUSRdData.impebreak, RegFieldDesc("impebreak", "impebreak", reset=Some(if (cfg.hasImplicitEbreak) 1 else 0)))
))
val dmcs2RegFields = RegFieldGroup("dmi_dmcs2", Some("debug module control/status register 2"), Seq(
WNotifyVal(1, DMCS2RdData.hgselect, DMCS2WrData.hgselect, hgselectWrEn,
RegFieldDesc("hgselect", "select halt groups or external triggers", reset=Some(0), volatile=true)),
WNotifyVal(1, 0.U, DMCS2WrData.hgwrite, hgwriteWrEn,
RegFieldDesc("hgwrite", "write 1 to change halt groups", reset=None, access=RegFieldAccessType.W)),
WNotifyVal(5, DMCS2RdData.haltgroup, DMCS2WrData.haltgroup, haltgroupWrEn,
RegFieldDesc("haltgroup", "halt group", reset=Some(0), volatile=true)),
if (nExtTriggers > 1)
WNotifyVal(4, DMCS2RdData.exttrigger, DMCS2WrData.exttrigger, exttriggerWrEn,
RegFieldDesc("exttrigger", "external trigger select", reset=Some(0), volatile=true))
else RegField(4)
))
val abstractcsRegFields = RegFieldGroup("dmi_abstractcs", Some("abstract command control/status"), Seq(
RegField.r(4, ABSTRACTCSRdData.datacount, RegFieldDesc("datacount", "number of DATA registers", reset=Some(cfg.nAbstractDataWords))),
RegField(4),
WNotifyVal(3, ABSTRACTCSRdData.cmderr, ABSTRACTCSWrData.cmderr, ABSTRACTCSWrEnMaybe,
RegFieldDesc("cmderr", "command error", reset=Some(0), wrType=Some(RegFieldWrType.ONE_TO_CLEAR))),
RegField(1),
RegField.r(1, ABSTRACTCSRdData.busy, RegFieldDesc("busy", "busy", reset=Some(0))),
RegField(11),
RegField.r(5, ABSTRACTCSRdData.progbufsize, RegFieldDesc("progbufsize", "number of PROGBUF registers", reset=Some(cfg.nProgramBufferWords)))
))
val (sbcsFields, sbAddrFields, sbDataFields):
(Seq[RegField], Seq[Seq[RegField]], Seq[Seq[RegField]]) = sb2tlOpt.map{ sb2tl =>
SystemBusAccessModule(sb2tl, io.dmactive, dmAuthenticated)(p)
}.getOrElse((Seq.empty[RegField], Seq.fill[Seq[RegField]](4)(Seq.empty[RegField]), Seq.fill[Seq[RegField]](4)(Seq.empty[RegField])))
//--------------------------------------------------------------
// Program Buffer Access (DMI ... System Bus can override)
//--------------------------------------------------------------
val omRegMap = dmiNode.regmap(
(DMI_DMSTATUS << 2) -> dmstatusRegFields,
//TODO (DMI_CFGSTRADDR0 << 2) -> cfgStrAddrFields,
(DMI_DMCS2 << 2) -> (if (nHaltGroups > 0) dmcs2RegFields else Nil),
(DMI_HALTSUM0 << 2) -> RegFieldGroup("dmi_haltsum0", Some("Halt Summary 0"),
Seq(RegField.r(32, HALTSUM0RdData.asUInt, RegFieldDesc("dmi_haltsum0", "halt summary 0")))),
(DMI_HALTSUM1 << 2) -> RegFieldGroup("dmi_haltsum1", Some("Halt Summary 1"),
Seq(RegField.r(32, HALTSUM1RdData.asUInt, RegFieldDesc("dmi_haltsum1", "halt summary 1")))),
(DMI_ABSTRACTCS << 2) -> abstractcsRegFields,
(DMI_ABSTRACTAUTO<< 2) -> RegFieldGroup("dmi_abstractauto", Some("abstract command autoexec"), Seq(
WNotifyVal(cfg.nAbstractDataWords, ABSTRACTAUTORdData.autoexecdata, ABSTRACTAUTOWrData.autoexecdata, autoexecdataWrEnMaybe,
RegFieldDesc("autoexecdata", "abstract command data autoexec", reset=Some(0))),
RegField(16-cfg.nAbstractDataWords),
WNotifyVal(cfg.nProgramBufferWords, ABSTRACTAUTORdData.autoexecprogbuf, ABSTRACTAUTOWrData.autoexecprogbuf, autoexecprogbufWrEnMaybe,
RegFieldDesc("autoexecprogbuf", "abstract command progbuf autoexec", reset=Some(0))))),
(DMI_COMMAND << 2) -> RegFieldGroup("dmi_command", Some("Abstract Command Register"),
Seq(RWNotify(32, COMMANDRdData.asUInt, COMMANDWrDataVal, COMMANDRdEn, COMMANDWrEnMaybe,
Some(RegFieldDesc("dmi_command", "abstract command register", reset=Some(0), volatile=true))))),
(DMI_DATA0 << 2) -> RegFieldGroup("dmi_data", Some("abstract command data registers"), abstractDataMem.zipWithIndex.map{case (x, i) =>
RWNotify(8, Mux(dmAuthenticated, x, 0.U), abstractDataNxt(i),
dmiAbstractDataRdEn(i),
dmiAbstractDataWrEnMaybe(i),
Some(RegFieldDesc(s"dmi_data_$i", s"abstract command data register $i", reset = Some(0), volatile=true)))}, false),
(DMI_PROGBUF0 << 2) -> RegFieldGroup("dmi_progbuf", Some("abstract command progbuf registers"), programBufferMem.zipWithIndex.map{case (x, i) =>
RWNotify(8, Mux(dmAuthenticated, x, 0.U), programBufferNxt(i),
dmiProgramBufferRdEn(i),
dmiProgramBufferWrEnMaybe(i),
Some(RegFieldDesc(s"dmi_progbuf_$i", s"abstract command progbuf register $i", reset = Some(0))))}, false),
(DMI_AUTHDATA << 2) -> (if (cfg.hasAuthentication) RegFieldGroup("dmi_authdata", Some("authentication data exchange register"),
Seq(RWNotify(32, io.auth.get.dmAuthRdata, io.auth.get.dmAuthWdata, authRdEnMaybe, authWrEnMaybe,
Some(RegFieldDesc("authdata", "authentication data exchange", volatile=true))))) else Nil),
(DMI_SBCS << 2) -> sbcsFields,
(DMI_SBDATA0 << 2) -> sbDataFields(0),
(DMI_SBDATA1 << 2) -> sbDataFields(1),
(DMI_SBDATA2 << 2) -> sbDataFields(2),
(DMI_SBDATA3 << 2) -> sbDataFields(3),
(DMI_SBADDRESS0 << 2) -> sbAddrFields(0),
(DMI_SBADDRESS1 << 2) -> sbAddrFields(1),
(DMI_SBADDRESS2 << 2) -> sbAddrFields(2),
(DMI_SBADDRESS3 << 2) -> sbAddrFields(3)
)
// Abstract data mem is written by both the tile link interface and DMI...
abstractDataMem.zipWithIndex.foreach { case (x, i) =>
when (dmAuthenticated && dmiAbstractDataWrEnMaybe(i) && dmiAbstractDataAccessLegal) {
x := abstractDataNxt(i)
}
}
// ... and also by custom register read (if implemented)
val (customs, customParams) = customNode.in.unzip
val needCustom = (customs.size > 0) && (customParams.head.addrs.size > 0)
def getNeedCustom = () => needCustom
if (needCustom) {
val (custom, customP) = customNode.in.head
require(customP.width % 8 == 0, s"Debug Custom width must be divisible by 8, not ${customP.width}")
val custom_data = custom.data.asBools
val custom_bytes = Seq.tabulate(customP.width/8){i => custom_data.slice(i*8, (i+1)*8).asUInt}
when (custom.ready && custom.valid) {
(abstractDataMem zip custom_bytes).zipWithIndex.foreach {case ((a, b), i) =>
a := b
}
}
}
programBufferMem.zipWithIndex.foreach { case (x, i) =>
when (dmAuthenticated && dmiProgramBufferWrEnMaybe(i) && dmiProgramBufferAccessLegal) {
x := programBufferNxt(i)
}
}
//--------------------------------------------------------------
// "Variable" ROM Generation
//--------------------------------------------------------------
val goReg = Reg(Bool())
val goAbstract = WireInit(false.B)
val goCustom = WireInit(false.B)
val jalAbstract = WireInit(Instructions.JAL.value.U.asTypeOf(new GeneratedUJ()))
jalAbstract.setImm(ABSTRACT(cfg) - WHERETO)
when (~io.dmactive){
goReg := false.B
}.otherwise {
when (goAbstract) {
goReg := true.B
}.elsewhen (hartGoingWrEn){
assert(hartGoingId === 0.U, "Unexpected 'GOING' hart.")//Chisel3 #540 %x, expected %x", hartGoingId, 0.U)
goReg := false.B
}
}
class flagBundle extends Bundle {
val reserved = UInt(6.W)
val resume = Bool()
val go = Bool()
}
val flags = WireInit(VecInit(Seq.fill(1 << selectedHartReg.getWidth) {0.U.asTypeOf(new flagBundle())} ))
assert ((hartSelFuncs.hartSelToHartId(selectedHartReg) < flags.size.U),
s"HartSel to HartId Mapping is illegal for this Debug Implementation, because HartID must be < ${flags.size} for it to work.")
flags(hartSelFuncs.hartSelToHartId(selectedHartReg)).go := goReg
for (component <- 0 until nComponents) {
val componentSel = WireInit(component.U)
flags(hartSelFuncs.hartSelToHartId(componentSel)).resume := resumeReqRegs(component)
}
//----------------------------
// Abstract Command Decoding & Generation
//----------------------------
val accessRegisterCommandWr = WireInit(COMMANDWrData.asUInt.asTypeOf(new ACCESS_REGISTERFields()))
/** real COMMAND*/
val accessRegisterCommandReg = WireInit(COMMANDReg.asUInt.asTypeOf(new ACCESS_REGISTERFields()))
// TODO: Quick Access
class GeneratedI extends Bundle {
val imm = UInt(12.W)
val rs1 = UInt(5.W)
val funct3 = UInt(3.W)
val rd = UInt(5.W)
val opcode = UInt(7.W)
}
class GeneratedS extends Bundle {
val immhi = UInt(7.W)
val rs2 = UInt(5.W)
val rs1 = UInt(5.W)
val funct3 = UInt(3.W)
val immlo = UInt(5.W)
val opcode = UInt(7.W)
}
class GeneratedCSR extends Bundle {
val imm = UInt(12.W)
val rs1 = UInt(5.W)
val funct3 = UInt(3.W)
val rd = UInt(5.W)
val opcode = UInt(7.W)
}
class GeneratedUJ extends Bundle {
val imm3 = UInt(1.W)
val imm0 = UInt(10.W)
val imm1 = UInt(1.W)
val imm2 = UInt(8.W)
val rd = UInt(5.W)
val opcode = UInt(7.W)
def setImm(imm: Int) : Unit = {
// TODO: Check bounds of imm.
require(imm % 2 == 0, "Immediate must be even for UJ encoding.")
val immWire = WireInit(imm.S(21.W))
val immBits = WireInit(VecInit(immWire.asBools))
imm0 := immBits.slice(1, 1 + 10).asUInt
imm1 := immBits.slice(11, 11 + 11).asUInt
imm2 := immBits.slice(12, 12 + 8).asUInt
imm3 := immBits.slice(20, 20 + 1).asUInt
}
}
require((cfg.atzero && cfg.nAbstractInstructions == 2) || (!cfg.atzero && cfg.nAbstractInstructions == 5),
"Mismatch between DebugModuleParams atzero and nAbstractInstructions")
val abstractGeneratedMem = Reg(Vec(cfg.nAbstractInstructions, (UInt(32.W))))
def abstractGeneratedI(cfg: DebugModuleParams): UInt = {
val inst = Wire(new GeneratedI())
val offset = if (cfg.atzero) DATA else (DATA-0x800) & 0xFFF
val base = if (cfg.atzero) 0.U else Mux(accessRegisterCommandReg.regno(0), 8.U, 9.U)
inst.opcode := (Instructions.LW.value.U.asTypeOf(new GeneratedI())).opcode
inst.rd := (accessRegisterCommandReg.regno & 0x1F.U)
inst.funct3 := accessRegisterCommandReg.size
inst.rs1 := base
inst.imm := offset.U
inst.asUInt
}
def abstractGeneratedS(cfg: DebugModuleParams): UInt = {
val inst = Wire(new GeneratedS())
val offset = if (cfg.atzero) DATA else (DATA-0x800) & 0xFFF
val base = if (cfg.atzero) 0.U else Mux(accessRegisterCommandReg.regno(0), 8.U, 9.U)
inst.opcode := (Instructions.SW.value.U.asTypeOf(new GeneratedS())).opcode
inst.immlo := (offset & 0x1F).U
inst.funct3 := accessRegisterCommandReg.size
inst.rs1 := base
inst.rs2 := (accessRegisterCommandReg.regno & 0x1F.U)
inst.immhi := (offset >> 5).U
inst.asUInt
}
def abstractGeneratedCSR: UInt = {
val inst = Wire(new GeneratedCSR())
val base = Mux(accessRegisterCommandReg.regno(0), 8.U, 9.U) // use s0 as base for odd regs, s1 as base for even regs
inst := (Instructions.CSRRW.value.U.asTypeOf(new GeneratedCSR()))
inst.imm := CSRs.dscratch1.U
inst.rs1 := base
inst.rd := base
inst.asUInt
}
val nop = Wire(new GeneratedI())
nop := Instructions.ADDI.value.U.asTypeOf(new GeneratedI())
nop.rd := 0.U
nop.rs1 := 0.U
nop.imm := 0.U
val isa = Wire(new GeneratedI())
isa := Instructions.ADDIW.value.U.asTypeOf(new GeneratedI())
isa.rd := 0.U
isa.rs1 := 0.U
isa.imm := 0.U
when (goAbstract) {
if (cfg.nAbstractInstructions == 2) {
// ABSTRACT(0): Transfer: LW or SW, else NOP
// ABSTRACT(1): Postexec: NOP else EBREAK
abstractGeneratedMem(0) := Mux(accessRegisterCommandReg.transfer,
Mux(accessRegisterCommandReg.write, abstractGeneratedI(cfg), abstractGeneratedS(cfg)),
nop.asUInt
)
abstractGeneratedMem(1) := Mux(accessRegisterCommandReg.postexec,
nop.asUInt,
Instructions.EBREAK.value.U)
} else {
// Entry: All regs in GPRs, dscratch1=offset 0x800 in DM
// ABSTRACT(0): CheckISA: ADDW or NOP (exception here if size=3 and not RV64)
// ABSTRACT(1): CSRRW s1,dscratch1,s1 or CSRRW s0,dscratch1,s0
// ABSTRACT(2): Transfer: LW, SW, LD, SD else NOP
// ABSTRACT(3): CSRRW s1,dscratch1,s1 or CSRRW s0,dscratch1,s0
// ABSTRACT(4): Postexec: NOP else EBREAK
abstractGeneratedMem(0) := Mux(accessRegisterCommandReg.transfer && accessRegisterCommandReg.size =/= 2.U, isa.asUInt, nop.asUInt)
abstractGeneratedMem(1) := abstractGeneratedCSR
abstractGeneratedMem(2) := Mux(accessRegisterCommandReg.transfer,
Mux(accessRegisterCommandReg.write, abstractGeneratedI(cfg), abstractGeneratedS(cfg)),
nop.asUInt
)
abstractGeneratedMem(3) := abstractGeneratedCSR
abstractGeneratedMem(4) := Mux(accessRegisterCommandReg.postexec,
nop.asUInt,
Instructions.EBREAK.value.U)
}
}
//--------------------------------------------------------------
// Drive Custom Access
//--------------------------------------------------------------
if (needCustom) {
val (custom, customP) = customNode.in.head
custom.addr := accessRegisterCommandReg.regno
custom.valid := goCustom
}
//--------------------------------------------------------------
// Hart Bus Access
//--------------------------------------------------------------
tlNode.regmap(
// This memory is writable.
HALTED -> Seq(WNotifyWire(sbIdWidth, hartHaltedId, hartHaltedWrEn,
"debug_hart_halted", "Debug ROM Causes hart to write its hartID here when it is in Debug Mode.")),
GOING -> Seq(WNotifyWire(sbIdWidth, hartGoingId, hartGoingWrEn,
"debug_hart_going", "Debug ROM causes hart to write 0 here when it begins executing Debug Mode instructions.")),
RESUMING -> Seq(WNotifyWire(sbIdWidth, hartResumingId, hartResumingWrEn,
"debug_hart_resuming", "Debug ROM causes hart to write its hartID here when it leaves Debug Mode.")),
EXCEPTION -> Seq(WNotifyWire(sbIdWidth, hartExceptionId, hartExceptionWrEn,
"debug_hart_exception", "Debug ROM causes hart to write 0 here if it gets an exception in Debug Mode.")),
DATA -> RegFieldGroup("debug_data", Some("Data used to communicate with Debug Module"),
abstractDataMem.zipWithIndex.map {case (x, i) => RegField(8, x, RegFieldDesc(s"debug_data_$i", ""))}),
PROGBUF(cfg)-> RegFieldGroup("debug_progbuf", Some("Program buffer used to communicate with Debug Module"),
programBufferMem.zipWithIndex.map {case (x, i) => RegField(8, x, RegFieldDesc(s"debug_progbuf_$i", ""))}),
// These sections are read-only.
IMPEBREAK(cfg)-> {if (cfg.hasImplicitEbreak) Seq(RegField.r(32, Instructions.EBREAK.value.U,
RegFieldDesc("debug_impebreak", "Debug Implicit EBREAK", reset=Some(Instructions.EBREAK.value)))) else Nil},
WHERETO -> Seq(RegField.r(32, jalAbstract.asUInt, RegFieldDesc("debug_whereto", "Instruction filled in by Debug Module to control hart in Debug Mode", volatile = true))),
ABSTRACT(cfg) -> RegFieldGroup("debug_abstract", Some("Instructions generated by Debug Module"),
abstractGeneratedMem.zipWithIndex.map{ case (x,i) => RegField.r(32, x, RegFieldDesc(s"debug_abstract_$i", "", volatile=true))}),
FLAGS -> RegFieldGroup("debug_flags", Some("Memory region used to control hart going/resuming in Debug Mode"),
if (nComponents == 1) {
Seq.tabulate(1024) { i => RegField.r(8, flags(0).asUInt, RegFieldDesc(s"debug_flags_$i", "", volatile=true)) }
} else {
flags.zipWithIndex.map{case(x, i) => RegField.r(8, x.asUInt, RegFieldDesc(s"debug_flags_$i", "", volatile=true))}
}),
ROMBASE -> RegFieldGroup("debug_rom", Some("Debug ROM"),
(if (cfg.atzero) DebugRomContents() else DebugRomNonzeroContents()).zipWithIndex.map{case (x, i) =>
RegField.r(8, (x & 0xFF).U(8.W), RegFieldDesc(s"debug_rom_$i", "", reset=Some(x)))})
)
// Override System Bus accesses with dmactive reset.
when (~io.dmactive){
abstractDataMem.foreach {x => x := 0.U}
programBufferMem.foreach {x => x := 0.U}
}
//--------------------------------------------------------------
// Abstract Command State Machine
//--------------------------------------------------------------
object CtrlState extends scala.Enumeration {
type CtrlState = Value
val Waiting, CheckGenerate, Exec, Custom = Value
def apply( t : Value) : UInt = {
t.id.U(log2Up(values.size).W)
}
}
import CtrlState._
// This is not an initialization!
val ctrlStateReg = Reg(chiselTypeOf(CtrlState(Waiting)))
val hartHalted = haltedBitRegs(if (nComponents == 1) 0.U(0.W) else selectedHartReg)
val ctrlStateNxt = WireInit(ctrlStateReg)
//------------------------
// DMI Register Control and Status
abstractCommandBusy := (ctrlStateReg =/= CtrlState(Waiting))
ABSTRACTCSWrEnLegal := (ctrlStateReg === CtrlState(Waiting))
COMMANDWrEnLegal := (ctrlStateReg === CtrlState(Waiting))
ABSTRACTAUTOWrEnLegal := (ctrlStateReg === CtrlState(Waiting))
dmiAbstractDataAccessLegal := (ctrlStateReg === CtrlState(Waiting))
dmiProgramBufferAccessLegal := (ctrlStateReg === CtrlState(Waiting))
errorBusy := (ABSTRACTCSWrEnMaybe && ~ABSTRACTCSWrEnLegal) ||
(autoexecdataWrEnMaybe && ~ABSTRACTAUTOWrEnLegal) ||
(autoexecprogbufWrEnMaybe && ~ABSTRACTAUTOWrEnLegal) ||
(COMMANDWrEnMaybe && ~COMMANDWrEnLegal) ||
(dmiAbstractDataAccess && ~dmiAbstractDataAccessLegal) ||
(dmiProgramBufferAccess && ~dmiProgramBufferAccessLegal)
// TODO: Maybe Quick Access
val commandWrIsAccessRegister = (COMMANDWrData.cmdtype === DebugAbstractCommandType.AccessRegister.id.U)
val commandRegIsAccessRegister = (COMMANDReg.cmdtype === DebugAbstractCommandType.AccessRegister.id.U)
val commandWrIsUnsupported = COMMANDWrEn && !commandWrIsAccessRegister
val commandRegIsUnsupported = WireInit(true.B)
val commandRegBadHaltResume = WireInit(false.B)
// We only support abstract commands for GPRs and any custom registers, if specified.
val accessRegIsLegalSize = (accessRegisterCommandReg.size === 2.U) || (accessRegisterCommandReg.size === 3.U)
val accessRegIsGPR = (accessRegisterCommandReg.regno >= 0x1000.U && accessRegisterCommandReg.regno <= 0x101F.U) && accessRegIsLegalSize
val accessRegIsCustom = if (needCustom) {
val (custom, customP) = customNode.in.head
customP.addrs.foldLeft(false.B){
(result, current) => result || (current.U === accessRegisterCommandReg.regno)}
} else false.B
when (commandRegIsAccessRegister) {
when (accessRegIsCustom && accessRegisterCommandReg.transfer && accessRegisterCommandReg.write === false.B) {
commandRegIsUnsupported := false.B
}.elsewhen (!accessRegisterCommandReg.transfer || accessRegIsGPR) {
commandRegIsUnsupported := false.B
commandRegBadHaltResume := ~hartHalted
}
}
val wrAccessRegisterCommand = COMMANDWrEn && commandWrIsAccessRegister && (ABSTRACTCSReg.cmderr === 0.U)
val regAccessRegisterCommand = autoexec && commandRegIsAccessRegister && (ABSTRACTCSReg.cmderr === 0.U)
//------------------------
// Variable ROM STATE MACHINE
// -----------------------
when (ctrlStateReg === CtrlState(Waiting)){
when (wrAccessRegisterCommand || regAccessRegisterCommand) {
ctrlStateNxt := CtrlState(CheckGenerate)
}.elsewhen (commandWrIsUnsupported) { // These checks are really on the command type.
errorUnsupported := true.B
}.elsewhen (autoexec && commandRegIsUnsupported) {
errorUnsupported := true.B
}
}.elsewhen (ctrlStateReg === CtrlState(CheckGenerate)){
// We use this state to ensure that the COMMAND has been
// registered by the time that we need to use it, to avoid
// generating it directly from the COMMANDWrData.
// This 'commandRegIsUnsupported' is really just checking the
// AccessRegisterCommand parameters (regno)
when (commandRegIsUnsupported) {
errorUnsupported := true.B
ctrlStateNxt := CtrlState(Waiting)
}.elsewhen (commandRegBadHaltResume){
errorHaltResume := true.B
ctrlStateNxt := CtrlState(Waiting)
}.otherwise {
when(accessRegIsCustom) {
ctrlStateNxt := CtrlState(Custom)
}.otherwise {
ctrlStateNxt := CtrlState(Exec)
goAbstract := true.B
}
}
}.elsewhen (ctrlStateReg === CtrlState(Exec)) {
// We can't just look at 'hartHalted' here, because
// hartHaltedWrEn is overloaded to mean 'got an ebreak'
// which may have happened when we were already halted.
when(goReg === false.B && hartHaltedWrEn && (hartSelFuncs.hartIdToHartSel(hartHaltedId) === selectedHartReg)){
ctrlStateNxt := CtrlState(Waiting)
}
when(hartExceptionWrEn) {
assert(hartExceptionId === 0.U, "Unexpected 'EXCEPTION' hart")//Chisel3 #540, %x, expected %x", hartExceptionId, 0.U)
ctrlStateNxt := CtrlState(Waiting)
errorException := true.B
}
}.elsewhen (ctrlStateReg === CtrlState(Custom)) {
assert(needCustom.B, "Should not be in custom state unless we need it.")
goCustom := true.B
val (custom, customP) = customNode.in.head
when (custom.ready && custom.valid) {
ctrlStateNxt := CtrlState(Waiting)
}
}
when (~io.dmactive || ~dmAuthenticated) {
ctrlStateReg := CtrlState(Waiting)
}.otherwise {
ctrlStateReg := ctrlStateNxt
}
assert ((!io.dmactive || !hartExceptionWrEn || ctrlStateReg === CtrlState(Exec)),
"Unexpected EXCEPTION write: should only get it in Debug Module EXEC state")
}
}
// Wrapper around TL Debug Module Inner and an Async DMI Sink interface.
// Handles the synchronization of dmactive, which is used as a synchronous reset
// inside the Inner block.
// Also is the Sink side of hartsel & resumereq fields of DMCONTROL.
class TLDebugModuleInnerAsync(device: Device, getNComponents: () => Int, beatBytes: Int)(implicit p: Parameters) extends LazyModule{
val cfg = p(DebugModuleKey).get
val dmInner = LazyModule(new TLDebugModuleInner(device, getNComponents, beatBytes))
val dmiXing = LazyModule(new TLAsyncCrossingSink(AsyncQueueParams.singleton(safe=cfg.crossingHasSafeReset)))
val dmiNode = dmiXing.node
val tlNode = dmInner.tlNode
dmInner.dmiNode := dmiXing.node
// Require that there are no registers in TL interface, so that spurious
// processor accesses to the DM don't need to enable the clock. We don't
// require this property of the SBA, because the debugger is responsible for
// raising dmactive (hence enabling the clock) during these transactions.
require(dmInner.tlNode.concurrency == 0)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
// Clock/reset domains:
// debug_clock / debug_reset = Debug inner domain
// tl_clock / tl_reset = tilelink domain (External: clock / reset)
//
val io = IO(new Bundle {
val debug_clock = Input(Clock())
val debug_reset = Input(Reset())
val tl_clock = Input(Clock())
val tl_reset = Input(Reset())
// These are all asynchronous and come from Outer
/** reset signal for DM */
val dmactive = Input(Bool())
/** conrol signals for Inner
*
* generated in Outer
*/
val innerCtrl = Flipped(new AsyncBundle(new DebugInternalBundle(getNComponents()), AsyncQueueParams.singleton(safe=cfg.crossingHasSafeReset)))
// This comes from tlClk domain.
/** debug available status */
val debugUnavail = Input(Vec(getNComponents(), Bool()))
/** debug interruption*/
val hgDebugInt = Output(Vec(getNComponents(), Bool()))
val extTrigger = (p(DebugModuleKey).get.nExtTriggers > 0).option(new DebugExtTriggerIO())
/** vector to indicate which hart is in reset
*
* dm receives it from core and sends it to Inner
*/
val hartIsInReset = Input(Vec(getNComponents(), Bool()))
/** Debug Authentication signals from core */
val auth = p(DebugModuleKey).get.hasAuthentication.option(new DebugAuthenticationIO())
})
val rf_reset = IO(Input(Reset())) // RF transform
childClock := io.debug_clock
childReset := io.debug_reset
override def provideImplicitClockToLazyChildren = true
val dmactive_synced = withClockAndReset(childClock, childReset) {
val dmactive_synced = AsyncResetSynchronizerShiftReg(in=io.dmactive, sync=3, name=Some("dmactiveSync"))
dmInner.module.clock := io.debug_clock
dmInner.module.reset := io.debug_reset
dmInner.module.io.tl_clock := io.tl_clock
dmInner.module.io.tl_reset := io.tl_reset
dmInner.module.io.dmactive := dmactive_synced
dmInner.module.io.innerCtrl <> FromAsyncBundle(io.innerCtrl)
dmInner.module.io.debugUnavail := io.debugUnavail
io.hgDebugInt := dmInner.module.io.hgDebugInt
io.extTrigger.foreach { x => dmInner.module.io.extTrigger.foreach {y => x <> y}}
dmInner.module.io.hartIsInReset := io.hartIsInReset
io.auth.foreach { x => dmInner.module.io.auth.foreach {y => x <> y}}
dmactive_synced
}
}
}
/** Create a version of the TLDebugModule which includes a synchronization interface
* internally for the DMI. This is no longer optional outside of this module
* because the Clock must run when tl_clock isn't running or tl_reset is asserted.
*/
class TLDebugModule(beatBytes: Int)(implicit p: Parameters) extends LazyModule {
val device = new SimpleDevice("debug-controller", Seq("sifive,debug-013","riscv,debug-013")){
override val alwaysExtended = true
override def describe(resources: ResourceBindings): Description = {
val Description(name, mapping) = super.describe(resources)
val attach = Map(
"debug-attach" -> (
(if (p(ExportDebug).apb) Seq(ResourceString("apb")) else Seq()) ++
(if (p(ExportDebug).jtag) Seq(ResourceString("jtag")) else Seq()) ++
(if (p(ExportDebug).cjtag) Seq(ResourceString("cjtag")) else Seq()) ++
(if (p(ExportDebug).dmi) Seq(ResourceString("dmi")) else Seq())))
Description(name, mapping ++ attach)
}
}
val dmOuter : TLDebugModuleOuterAsync = LazyModule(new TLDebugModuleOuterAsync(device)(p))
val dmInner : TLDebugModuleInnerAsync = LazyModule(new TLDebugModuleInnerAsync(device, () => {dmOuter.dmOuter.intnode.edges.out.size}, beatBytes)(p))
val node = dmInner.tlNode
val intnode = dmOuter.intnode
val apbNodeOpt = dmOuter.apbNodeOpt
dmInner.dmiNode := dmOuter.dmiInnerNode
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val nComponents = dmOuter.dmOuter.intnode.edges.out.size
// Clock/reset domains:
// tl_clock / tl_reset = tilelink domain
// debug_clock / debug_reset = Inner debug (synchronous to tl_clock)
// apb_clock / apb_reset = Outer debug with APB
// dmiClock / dmiReset = Outer debug without APB
//
val io = IO(new Bundle {
val debug_clock = Input(Clock())
val debug_reset = Input(Reset())
val tl_clock = Input(Clock())
val tl_reset = Input(Reset())
/** Debug control signals generated in Outer */
val ctrl = new DebugCtrlBundle(nComponents)
/** Debug Module Interface bewteen DM and DTM
*
* The DTM provides access to one or more Debug Modules (DMs) using DMI
*/
val dmi = (!p(ExportDebug).apb).option(Flipped(new ClockedDMIIO()))
val apb_clock = p(ExportDebug).apb.option(Input(Clock()))
val apb_reset = p(ExportDebug).apb.option(Input(Reset()))
val extTrigger = (p(DebugModuleKey).get.nExtTriggers > 0).option(new DebugExtTriggerIO())
/** vector to indicate which hart is in reset
*
* dm receives it from core and sends it to Inner
*/
val hartIsInReset = Input(Vec(nComponents, Bool()))
/** hart reset request generated by hartreset-logic in Outer */
val hartResetReq = p(DebugModuleKey).get.hasHartResets.option(Output(Vec(nComponents, Bool())))
/** Debug Authentication signals from core */
val auth = p(DebugModuleKey).get.hasAuthentication.option(new DebugAuthenticationIO())
})
childClock := io.tl_clock
childReset := io.tl_reset
override def provideImplicitClockToLazyChildren = true
dmOuter.module.io.dmi.foreach { dmOuterDMI =>
dmOuterDMI <> io.dmi.get.dmi
dmOuter.module.io.dmi_reset := io.dmi.get.dmiReset
dmOuter.module.io.dmi_clock := io.dmi.get.dmiClock
dmOuter.module.rf_reset := io.dmi.get.dmiReset
}
(io.apb_clock zip io.apb_reset) foreach { case (c, r) =>
dmOuter.module.io.dmi_reset := r
dmOuter.module.io.dmi_clock := c
dmOuter.module.rf_reset := r
}
dmInner.module.rf_reset := io.debug_reset
dmInner.module.io.debug_clock := io.debug_clock
dmInner.module.io.debug_reset := io.debug_reset
dmInner.module.io.tl_clock := io.tl_clock
dmInner.module.io.tl_reset := io.tl_reset
dmInner.module.io.innerCtrl <> dmOuter.module.io.innerCtrl
dmInner.module.io.dmactive := dmOuter.module.io.ctrl.dmactive
dmInner.module.io.debugUnavail := io.ctrl.debugUnavail
dmOuter.module.io.hgDebugInt := dmInner.module.io.hgDebugInt
io.ctrl <> dmOuter.module.io.ctrl
io.extTrigger.foreach { x => dmInner.module.io.extTrigger.foreach {y => x <> y}}
dmInner.module.io.hartIsInReset := io.hartIsInReset
io.hartResetReq.foreach { x => dmOuter.module.io.hartResetReq.foreach {y => x := y}}
io.auth.foreach { x => dmOuter.module.io.dmAuthenticated.get := x.dmAuthenticated }
io.auth.foreach { x => dmInner.module.io.auth.foreach {y => x <> y}}
}
}
| module TLDebugModuleInnerAsync( // @[Debug.scala:1871:9]
input [2:0] auto_dmiXing_in_a_mem_0_opcode, // @[LazyModuleImp.scala:107:25]
input [8:0] auto_dmiXing_in_a_mem_0_address, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_dmiXing_in_a_mem_0_data, // @[LazyModuleImp.scala:107:25]
output auto_dmiXing_in_a_ridx, // @[LazyModuleImp.scala:107:25]
input auto_dmiXing_in_a_widx, // @[LazyModuleImp.scala:107:25]
output auto_dmiXing_in_a_safe_ridx_valid, // @[LazyModuleImp.scala:107:25]
input auto_dmiXing_in_a_safe_widx_valid, // @[LazyModuleImp.scala:107:25]
input auto_dmiXing_in_a_safe_source_reset_n, // @[LazyModuleImp.scala:107:25]
output auto_dmiXing_in_a_safe_sink_reset_n, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_dmiXing_in_d_mem_0_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_dmiXing_in_d_mem_0_size, // @[LazyModuleImp.scala:107:25]
output auto_dmiXing_in_d_mem_0_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_dmiXing_in_d_mem_0_data, // @[LazyModuleImp.scala:107:25]
input auto_dmiXing_in_d_ridx, // @[LazyModuleImp.scala:107:25]
output auto_dmiXing_in_d_widx, // @[LazyModuleImp.scala:107:25]
input auto_dmiXing_in_d_safe_ridx_valid, // @[LazyModuleImp.scala:107:25]
output auto_dmiXing_in_d_safe_widx_valid, // @[LazyModuleImp.scala:107:25]
output auto_dmiXing_in_d_safe_source_reset_n, // @[LazyModuleImp.scala:107:25]
input auto_dmiXing_in_d_safe_sink_reset_n, // @[LazyModuleImp.scala:107:25]
input auto_dmInner_sb2tlOpt_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_dmInner_sb2tlOpt_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_dmInner_sb2tlOpt_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_dmInner_sb2tlOpt_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_dmInner_sb2tlOpt_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_dmInner_sb2tlOpt_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_dmInner_sb2tlOpt_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_dmInner_sb2tlOpt_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dmInner_sb2tlOpt_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dmInner_sb2tlOpt_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dmInner_sb2tlOpt_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dmInner_sb2tlOpt_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_dmInner_sb2tlOpt_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_dmInner_sb2tlOpt_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_dmInner_sb2tlOpt_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_dmInner_tl_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_dmInner_tl_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dmInner_tl_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dmInner_tl_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dmInner_tl_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [11:0] auto_dmInner_tl_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [11:0] auto_dmInner_tl_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_dmInner_tl_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_dmInner_tl_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_dmInner_tl_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_dmInner_tl_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_dmInner_tl_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_dmInner_tl_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_dmInner_tl_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [11:0] auto_dmInner_tl_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_dmInner_tl_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
input io_debug_clock, // @[Debug.scala:1877:16]
input io_debug_reset, // @[Debug.scala:1877:16]
input io_tl_clock, // @[Debug.scala:1877:16]
input io_tl_reset, // @[Debug.scala:1877:16]
input io_dmactive, // @[Debug.scala:1877:16]
input io_innerCtrl_mem_0_resumereq, // @[Debug.scala:1877:16]
input [9:0] io_innerCtrl_mem_0_hartsel, // @[Debug.scala:1877:16]
input io_innerCtrl_mem_0_ackhavereset, // @[Debug.scala:1877:16]
input io_innerCtrl_mem_0_hrmask_0, // @[Debug.scala:1877:16]
output io_innerCtrl_ridx, // @[Debug.scala:1877:16]
input io_innerCtrl_widx, // @[Debug.scala:1877:16]
output io_innerCtrl_safe_ridx_valid, // @[Debug.scala:1877:16]
input io_innerCtrl_safe_widx_valid, // @[Debug.scala:1877:16]
input io_innerCtrl_safe_source_reset_n, // @[Debug.scala:1877:16]
output io_innerCtrl_safe_sink_reset_n, // @[Debug.scala:1877:16]
output io_hgDebugInt_0, // @[Debug.scala:1877:16]
input io_hartIsInReset_0 // @[Debug.scala:1877:16]
);
wire _dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_valid; // @[AsyncQueue.scala:211:22]
wire _dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_resumereq; // @[AsyncQueue.scala:211:22]
wire [9:0] _dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hartsel; // @[AsyncQueue.scala:211:22]
wire _dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_ackhavereset; // @[AsyncQueue.scala:211:22]
wire _dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hrmask_0; // @[AsyncQueue.scala:211:22]
wire _dmactive_synced_dmactive_synced_dmactiveSync_io_q; // @[ShiftReg.scala:45:23]
wire _dmiXing_auto_out_a_valid; // @[Debug.scala:1858:27]
wire [2:0] _dmiXing_auto_out_a_bits_opcode; // @[Debug.scala:1858:27]
wire [2:0] _dmiXing_auto_out_a_bits_param; // @[Debug.scala:1858:27]
wire [1:0] _dmiXing_auto_out_a_bits_size; // @[Debug.scala:1858:27]
wire _dmiXing_auto_out_a_bits_source; // @[Debug.scala:1858:27]
wire [8:0] _dmiXing_auto_out_a_bits_address; // @[Debug.scala:1858:27]
wire [3:0] _dmiXing_auto_out_a_bits_mask; // @[Debug.scala:1858:27]
wire [31:0] _dmiXing_auto_out_a_bits_data; // @[Debug.scala:1858:27]
wire _dmiXing_auto_out_a_bits_corrupt; // @[Debug.scala:1858:27]
wire _dmiXing_auto_out_d_ready; // @[Debug.scala:1858:27]
wire _dmInner_auto_dmi_in_a_ready; // @[Debug.scala:1857:27]
wire _dmInner_auto_dmi_in_d_valid; // @[Debug.scala:1857:27]
wire [2:0] _dmInner_auto_dmi_in_d_bits_opcode; // @[Debug.scala:1857:27]
wire [1:0] _dmInner_auto_dmi_in_d_bits_size; // @[Debug.scala:1857:27]
wire _dmInner_auto_dmi_in_d_bits_source; // @[Debug.scala:1857:27]
wire [31:0] _dmInner_auto_dmi_in_d_bits_data; // @[Debug.scala:1857:27]
TLDebugModuleInner dmInner ( // @[Debug.scala:1857:27]
.clock (io_debug_clock),
.reset (io_debug_reset),
.auto_sb2tlOpt_out_a_ready (auto_dmInner_sb2tlOpt_out_a_ready),
.auto_sb2tlOpt_out_a_valid (auto_dmInner_sb2tlOpt_out_a_valid),
.auto_sb2tlOpt_out_a_bits_opcode (auto_dmInner_sb2tlOpt_out_a_bits_opcode),
.auto_sb2tlOpt_out_a_bits_size (auto_dmInner_sb2tlOpt_out_a_bits_size),
.auto_sb2tlOpt_out_a_bits_address (auto_dmInner_sb2tlOpt_out_a_bits_address),
.auto_sb2tlOpt_out_a_bits_data (auto_dmInner_sb2tlOpt_out_a_bits_data),
.auto_sb2tlOpt_out_d_ready (auto_dmInner_sb2tlOpt_out_d_ready),
.auto_sb2tlOpt_out_d_valid (auto_dmInner_sb2tlOpt_out_d_valid),
.auto_sb2tlOpt_out_d_bits_opcode (auto_dmInner_sb2tlOpt_out_d_bits_opcode),
.auto_sb2tlOpt_out_d_bits_param (auto_dmInner_sb2tlOpt_out_d_bits_param),
.auto_sb2tlOpt_out_d_bits_size (auto_dmInner_sb2tlOpt_out_d_bits_size),
.auto_sb2tlOpt_out_d_bits_sink (auto_dmInner_sb2tlOpt_out_d_bits_sink),
.auto_sb2tlOpt_out_d_bits_denied (auto_dmInner_sb2tlOpt_out_d_bits_denied),
.auto_sb2tlOpt_out_d_bits_data (auto_dmInner_sb2tlOpt_out_d_bits_data),
.auto_sb2tlOpt_out_d_bits_corrupt (auto_dmInner_sb2tlOpt_out_d_bits_corrupt),
.auto_tl_in_a_ready (auto_dmInner_tl_in_a_ready),
.auto_tl_in_a_valid (auto_dmInner_tl_in_a_valid),
.auto_tl_in_a_bits_opcode (auto_dmInner_tl_in_a_bits_opcode),
.auto_tl_in_a_bits_param (auto_dmInner_tl_in_a_bits_param),
.auto_tl_in_a_bits_size (auto_dmInner_tl_in_a_bits_size),
.auto_tl_in_a_bits_source (auto_dmInner_tl_in_a_bits_source),
.auto_tl_in_a_bits_address (auto_dmInner_tl_in_a_bits_address),
.auto_tl_in_a_bits_mask (auto_dmInner_tl_in_a_bits_mask),
.auto_tl_in_a_bits_data (auto_dmInner_tl_in_a_bits_data),
.auto_tl_in_a_bits_corrupt (auto_dmInner_tl_in_a_bits_corrupt),
.auto_tl_in_d_ready (auto_dmInner_tl_in_d_ready),
.auto_tl_in_d_valid (auto_dmInner_tl_in_d_valid),
.auto_tl_in_d_bits_opcode (auto_dmInner_tl_in_d_bits_opcode),
.auto_tl_in_d_bits_size (auto_dmInner_tl_in_d_bits_size),
.auto_tl_in_d_bits_source (auto_dmInner_tl_in_d_bits_source),
.auto_tl_in_d_bits_data (auto_dmInner_tl_in_d_bits_data),
.auto_dmi_in_a_ready (_dmInner_auto_dmi_in_a_ready),
.auto_dmi_in_a_valid (_dmiXing_auto_out_a_valid), // @[Debug.scala:1858:27]
.auto_dmi_in_a_bits_opcode (_dmiXing_auto_out_a_bits_opcode), // @[Debug.scala:1858:27]
.auto_dmi_in_a_bits_param (_dmiXing_auto_out_a_bits_param), // @[Debug.scala:1858:27]
.auto_dmi_in_a_bits_size (_dmiXing_auto_out_a_bits_size), // @[Debug.scala:1858:27]
.auto_dmi_in_a_bits_source (_dmiXing_auto_out_a_bits_source), // @[Debug.scala:1858:27]
.auto_dmi_in_a_bits_address (_dmiXing_auto_out_a_bits_address), // @[Debug.scala:1858:27]
.auto_dmi_in_a_bits_mask (_dmiXing_auto_out_a_bits_mask), // @[Debug.scala:1858:27]
.auto_dmi_in_a_bits_data (_dmiXing_auto_out_a_bits_data), // @[Debug.scala:1858:27]
.auto_dmi_in_a_bits_corrupt (_dmiXing_auto_out_a_bits_corrupt), // @[Debug.scala:1858:27]
.auto_dmi_in_d_ready (_dmiXing_auto_out_d_ready), // @[Debug.scala:1858:27]
.auto_dmi_in_d_valid (_dmInner_auto_dmi_in_d_valid),
.auto_dmi_in_d_bits_opcode (_dmInner_auto_dmi_in_d_bits_opcode),
.auto_dmi_in_d_bits_size (_dmInner_auto_dmi_in_d_bits_size),
.auto_dmi_in_d_bits_source (_dmInner_auto_dmi_in_d_bits_source),
.auto_dmi_in_d_bits_data (_dmInner_auto_dmi_in_d_bits_data),
.io_dmactive (_dmactive_synced_dmactive_synced_dmactiveSync_io_q), // @[ShiftReg.scala:45:23]
.io_innerCtrl_valid (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_valid), // @[AsyncQueue.scala:211:22]
.io_innerCtrl_bits_resumereq (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_resumereq), // @[AsyncQueue.scala:211:22]
.io_innerCtrl_bits_hartsel (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hartsel), // @[AsyncQueue.scala:211:22]
.io_innerCtrl_bits_ackhavereset (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_ackhavereset), // @[AsyncQueue.scala:211:22]
.io_innerCtrl_bits_hrmask_0 (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hrmask_0), // @[AsyncQueue.scala:211:22]
.io_hgDebugInt_0 (io_hgDebugInt_0),
.io_hartIsInReset_0 (io_hartIsInReset_0),
.io_tl_clock (io_tl_clock),
.io_tl_reset (io_tl_reset)
); // @[Debug.scala:1857:27]
TLAsyncCrossingSink_a9d32s1k1z2u dmiXing ( // @[Debug.scala:1858:27]
.clock (io_debug_clock),
.reset (io_debug_reset),
.auto_in_a_mem_0_opcode (auto_dmiXing_in_a_mem_0_opcode),
.auto_in_a_mem_0_address (auto_dmiXing_in_a_mem_0_address),
.auto_in_a_mem_0_data (auto_dmiXing_in_a_mem_0_data),
.auto_in_a_ridx (auto_dmiXing_in_a_ridx),
.auto_in_a_widx (auto_dmiXing_in_a_widx),
.auto_in_a_safe_ridx_valid (auto_dmiXing_in_a_safe_ridx_valid),
.auto_in_a_safe_widx_valid (auto_dmiXing_in_a_safe_widx_valid),
.auto_in_a_safe_source_reset_n (auto_dmiXing_in_a_safe_source_reset_n),
.auto_in_a_safe_sink_reset_n (auto_dmiXing_in_a_safe_sink_reset_n),
.auto_in_d_mem_0_opcode (auto_dmiXing_in_d_mem_0_opcode),
.auto_in_d_mem_0_size (auto_dmiXing_in_d_mem_0_size),
.auto_in_d_mem_0_source (auto_dmiXing_in_d_mem_0_source),
.auto_in_d_mem_0_data (auto_dmiXing_in_d_mem_0_data),
.auto_in_d_ridx (auto_dmiXing_in_d_ridx),
.auto_in_d_widx (auto_dmiXing_in_d_widx),
.auto_in_d_safe_ridx_valid (auto_dmiXing_in_d_safe_ridx_valid),
.auto_in_d_safe_widx_valid (auto_dmiXing_in_d_safe_widx_valid),
.auto_in_d_safe_source_reset_n (auto_dmiXing_in_d_safe_source_reset_n),
.auto_in_d_safe_sink_reset_n (auto_dmiXing_in_d_safe_sink_reset_n),
.auto_out_a_ready (_dmInner_auto_dmi_in_a_ready), // @[Debug.scala:1857:27]
.auto_out_a_valid (_dmiXing_auto_out_a_valid),
.auto_out_a_bits_opcode (_dmiXing_auto_out_a_bits_opcode),
.auto_out_a_bits_param (_dmiXing_auto_out_a_bits_param),
.auto_out_a_bits_size (_dmiXing_auto_out_a_bits_size),
.auto_out_a_bits_source (_dmiXing_auto_out_a_bits_source),
.auto_out_a_bits_address (_dmiXing_auto_out_a_bits_address),
.auto_out_a_bits_mask (_dmiXing_auto_out_a_bits_mask),
.auto_out_a_bits_data (_dmiXing_auto_out_a_bits_data),
.auto_out_a_bits_corrupt (_dmiXing_auto_out_a_bits_corrupt),
.auto_out_d_ready (_dmiXing_auto_out_d_ready),
.auto_out_d_valid (_dmInner_auto_dmi_in_d_valid), // @[Debug.scala:1857:27]
.auto_out_d_bits_opcode (_dmInner_auto_dmi_in_d_bits_opcode), // @[Debug.scala:1857:27]
.auto_out_d_bits_size (_dmInner_auto_dmi_in_d_bits_size), // @[Debug.scala:1857:27]
.auto_out_d_bits_source (_dmInner_auto_dmi_in_d_bits_source), // @[Debug.scala:1857:27]
.auto_out_d_bits_data (_dmInner_auto_dmi_in_d_bits_data) // @[Debug.scala:1857:27]
); // @[Debug.scala:1858:27]
AsyncResetSynchronizerShiftReg_w1_d3_i0 dmactive_synced_dmactive_synced_dmactiveSync ( // @[ShiftReg.scala:45:23]
.clock (io_debug_clock),
.reset (io_debug_reset),
.io_d (io_dmactive),
.io_q (_dmactive_synced_dmactive_synced_dmactiveSync_io_q)
); // @[ShiftReg.scala:45:23]
AsyncQueueSink_DebugInternalBundle dmactive_synced_dmInner_io_innerCtrl_sink ( // @[AsyncQueue.scala:211:22]
.clock (io_debug_clock),
.reset (io_debug_reset),
.io_deq_valid (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_valid),
.io_deq_bits_resumereq (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_resumereq),
.io_deq_bits_hartsel (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hartsel),
.io_deq_bits_ackhavereset (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_ackhavereset),
.io_deq_bits_hrmask_0 (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hrmask_0),
.io_async_mem_0_resumereq (io_innerCtrl_mem_0_resumereq),
.io_async_mem_0_hartsel (io_innerCtrl_mem_0_hartsel),
.io_async_mem_0_ackhavereset (io_innerCtrl_mem_0_ackhavereset),
.io_async_mem_0_hrmask_0 (io_innerCtrl_mem_0_hrmask_0),
.io_async_ridx (io_innerCtrl_ridx),
.io_async_widx (io_innerCtrl_widx),
.io_async_safe_ridx_valid (io_innerCtrl_safe_ridx_valid),
.io_async_safe_widx_valid (io_innerCtrl_safe_widx_valid),
.io_async_safe_source_reset_n (io_innerCtrl_safe_source_reset_n),
.io_async_safe_sink_reset_n (io_innerCtrl_safe_sink_reset_n)
); // @[AsyncQueue.scala:211:22]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19]
wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File MulRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (ported from Verilog to
Chisel by Andrew Waterman).
Copyright 2019, 2020 The Regents of the University of California. All rights
reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulFullRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(new RawFloat(expWidth, sigWidth))
val b = Input(new RawFloat(expWidth, sigWidth))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth*2 - 1))
})
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
val notSigNaN_invalidExc = (io.a.isInf && io.b.isZero) || (io.a.isZero && io.b.isInf)
val notNaN_isInfOut = io.a.isInf || io.b.isInf
val notNaN_isZeroOut = io.a.isZero || io.b.isZero
val notNaN_signOut = io.a.sign ^ io.b.sign
val common_sExpOut = io.a.sExp + io.b.sExp - (1<<expWidth).S
val common_sigOut = (io.a.sig * io.b.sig)(sigWidth*2 - 1, 0)
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
io.invalidExc := isSigNaNRawFloat(io.a) || isSigNaNRawFloat(io.b) || notSigNaN_invalidExc
io.rawOut.isInf := notNaN_isInfOut
io.rawOut.isZero := notNaN_isZeroOut
io.rawOut.sExp := common_sExpOut
io.rawOut.isNaN := io.a.isNaN || io.b.isNaN
io.rawOut.sign := notNaN_signOut
io.rawOut.sig := common_sigOut
}
class MulRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(new RawFloat(expWidth, sigWidth))
val b = Input(new RawFloat(expWidth, sigWidth))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
val mulFullRaw = Module(new MulFullRawFN(expWidth, sigWidth))
mulFullRaw.io.a := io.a
mulFullRaw.io.b := io.b
io.invalidExc := mulFullRaw.io.invalidExc
io.rawOut := mulFullRaw.io.rawOut
io.rawOut.sig := {
val sig = mulFullRaw.io.rawOut.sig
Cat(sig >> (sigWidth - 2), sig(sigWidth - 3, 0).orR)
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulRecFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(UInt((expWidth + sigWidth + 1).W))
val b = Input(UInt((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(Bool())
val out = Output(UInt((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(UInt(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulRawFN = Module(new MulRawFN(expWidth, sigWidth))
mulRawFN.io.a := rawFloatFromRecFN(expWidth, sigWidth, io.a)
mulRawFN.io.b := rawFloatFromRecFN(expWidth, sigWidth, io.b)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN =
Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0))
roundRawFNToRecFN.io.invalidExc := mulRawFN.io.invalidExc
roundRawFNToRecFN.io.infiniteExc := false.B
roundRawFNToRecFN.io.in := mulRawFN.io.rawOut
roundRawFNToRecFN.io.roundingMode := io.roundingMode
roundRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
| module MulRawFN_54( // @[MulRecFN.scala:75:7]
input io_a_isNaN, // @[MulRecFN.scala:77:16]
input io_a_isInf, // @[MulRecFN.scala:77:16]
input io_a_isZero, // @[MulRecFN.scala:77:16]
input io_a_sign, // @[MulRecFN.scala:77:16]
input [9:0] io_a_sExp, // @[MulRecFN.scala:77:16]
input [24:0] io_a_sig, // @[MulRecFN.scala:77:16]
input io_b_isNaN, // @[MulRecFN.scala:77:16]
input io_b_isInf, // @[MulRecFN.scala:77:16]
input io_b_isZero, // @[MulRecFN.scala:77:16]
input io_b_sign, // @[MulRecFN.scala:77:16]
input [9:0] io_b_sExp, // @[MulRecFN.scala:77:16]
input [24:0] io_b_sig, // @[MulRecFN.scala:77:16]
output io_invalidExc, // @[MulRecFN.scala:77:16]
output io_rawOut_isNaN, // @[MulRecFN.scala:77:16]
output io_rawOut_isInf, // @[MulRecFN.scala:77:16]
output io_rawOut_isZero, // @[MulRecFN.scala:77:16]
output io_rawOut_sign, // @[MulRecFN.scala:77:16]
output [9:0] io_rawOut_sExp, // @[MulRecFN.scala:77:16]
output [26:0] io_rawOut_sig // @[MulRecFN.scala:77:16]
);
wire [47:0] _mulFullRaw_io_rawOut_sig; // @[MulRecFN.scala:84:28]
wire io_a_isNaN_0 = io_a_isNaN; // @[MulRecFN.scala:75:7]
wire io_a_isInf_0 = io_a_isInf; // @[MulRecFN.scala:75:7]
wire io_a_isZero_0 = io_a_isZero; // @[MulRecFN.scala:75:7]
wire io_a_sign_0 = io_a_sign; // @[MulRecFN.scala:75:7]
wire [9:0] io_a_sExp_0 = io_a_sExp; // @[MulRecFN.scala:75:7]
wire [24:0] io_a_sig_0 = io_a_sig; // @[MulRecFN.scala:75:7]
wire io_b_isNaN_0 = io_b_isNaN; // @[MulRecFN.scala:75:7]
wire io_b_isInf_0 = io_b_isInf; // @[MulRecFN.scala:75:7]
wire io_b_isZero_0 = io_b_isZero; // @[MulRecFN.scala:75:7]
wire io_b_sign_0 = io_b_sign; // @[MulRecFN.scala:75:7]
wire [9:0] io_b_sExp_0 = io_b_sExp; // @[MulRecFN.scala:75:7]
wire [24:0] io_b_sig_0 = io_b_sig; // @[MulRecFN.scala:75:7]
wire [26:0] _io_rawOut_sig_T_3; // @[MulRecFN.scala:93:10]
wire io_rawOut_isNaN_0; // @[MulRecFN.scala:75:7]
wire io_rawOut_isInf_0; // @[MulRecFN.scala:75:7]
wire io_rawOut_isZero_0; // @[MulRecFN.scala:75:7]
wire io_rawOut_sign_0; // @[MulRecFN.scala:75:7]
wire [9:0] io_rawOut_sExp_0; // @[MulRecFN.scala:75:7]
wire [26:0] io_rawOut_sig_0; // @[MulRecFN.scala:75:7]
wire io_invalidExc_0; // @[MulRecFN.scala:75:7]
wire [25:0] _io_rawOut_sig_T = _mulFullRaw_io_rawOut_sig[47:22]; // @[MulRecFN.scala:84:28, :93:15]
wire [21:0] _io_rawOut_sig_T_1 = _mulFullRaw_io_rawOut_sig[21:0]; // @[MulRecFN.scala:84:28, :93:37]
wire _io_rawOut_sig_T_2 = |_io_rawOut_sig_T_1; // @[MulRecFN.scala:93:{37,55}]
assign _io_rawOut_sig_T_3 = {_io_rawOut_sig_T, _io_rawOut_sig_T_2}; // @[MulRecFN.scala:93:{10,15,55}]
assign io_rawOut_sig_0 = _io_rawOut_sig_T_3; // @[MulRecFN.scala:75:7, :93:10]
MulFullRawFN_54 mulFullRaw ( // @[MulRecFN.scala:84:28]
.io_a_isNaN (io_a_isNaN_0), // @[MulRecFN.scala:75:7]
.io_a_isInf (io_a_isInf_0), // @[MulRecFN.scala:75:7]
.io_a_isZero (io_a_isZero_0), // @[MulRecFN.scala:75:7]
.io_a_sign (io_a_sign_0), // @[MulRecFN.scala:75:7]
.io_a_sExp (io_a_sExp_0), // @[MulRecFN.scala:75:7]
.io_a_sig (io_a_sig_0), // @[MulRecFN.scala:75:7]
.io_b_isNaN (io_b_isNaN_0), // @[MulRecFN.scala:75:7]
.io_b_isInf (io_b_isInf_0), // @[MulRecFN.scala:75:7]
.io_b_isZero (io_b_isZero_0), // @[MulRecFN.scala:75:7]
.io_b_sign (io_b_sign_0), // @[MulRecFN.scala:75:7]
.io_b_sExp (io_b_sExp_0), // @[MulRecFN.scala:75:7]
.io_b_sig (io_b_sig_0), // @[MulRecFN.scala:75:7]
.io_invalidExc (io_invalidExc_0),
.io_rawOut_isNaN (io_rawOut_isNaN_0),
.io_rawOut_isInf (io_rawOut_isInf_0),
.io_rawOut_isZero (io_rawOut_isZero_0),
.io_rawOut_sign (io_rawOut_sign_0),
.io_rawOut_sExp (io_rawOut_sExp_0),
.io_rawOut_sig (_mulFullRaw_io_rawOut_sig)
); // @[MulRecFN.scala:84:28]
assign io_invalidExc = io_invalidExc_0; // @[MulRecFN.scala:75:7]
assign io_rawOut_isNaN = io_rawOut_isNaN_0; // @[MulRecFN.scala:75:7]
assign io_rawOut_isInf = io_rawOut_isInf_0; // @[MulRecFN.scala:75:7]
assign io_rawOut_isZero = io_rawOut_isZero_0; // @[MulRecFN.scala:75:7]
assign io_rawOut_sign = io_rawOut_sign_0; // @[MulRecFN.scala:75:7]
assign io_rawOut_sExp = io_rawOut_sExp_0; // @[MulRecFN.scala:75:7]
assign io_rawOut_sig = io_rawOut_sig_0; // @[MulRecFN.scala:75:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ClockGroup.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.prci
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.resources.FixedClockResource
case class ClockGroupingNode(groupName: String)(implicit valName: ValName)
extends MixedNexusNode(ClockGroupImp, ClockImp)(
dFn = { _ => ClockSourceParameters() },
uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq) })
{
override def circuitIdentity = outputs.size == 1
}
class ClockGroup(groupName: String)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupingNode(groupName)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in(0)
val (out, _) = node.out.unzip
require (node.in.size == 1)
require (in.member.size == out.size)
(in.member.data zip out) foreach { case (i, o) => o := i }
}
}
object ClockGroup
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroup(valName.name)).node
}
case class ClockGroupAggregateNode(groupName: String)(implicit valName: ValName)
extends NexusNode(ClockGroupImp)(
dFn = { _ => ClockGroupSourceParameters() },
uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq.flatMap(_.members))})
{
override def circuitIdentity = outputs.size == 1
}
class ClockGroupAggregator(groupName: String)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupAggregateNode(groupName)
override lazy val desiredName = s"ClockGroupAggregator_$groupName"
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in.unzip
val (out, _) = node.out.unzip
val outputs = out.flatMap(_.member.data)
require (node.in.size == 1, s"Aggregator for groupName: ${groupName} had ${node.in.size} inward edges instead of 1")
require (in.head.member.size == outputs.size)
in.head.member.data.zip(outputs).foreach { case (i, o) => o := i }
}
}
object ClockGroupAggregator
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroupAggregator(valName.name)).node
}
class SimpleClockGroupSource(numSources: Int = 1)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupSourceNode(List.fill(numSources) { ClockGroupSourceParameters() })
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
val (out, _) = node.out.unzip
out.map { out: ClockGroupBundle =>
out.member.data.foreach { o =>
o.clock := clock; o.reset := reset }
}
}
}
object SimpleClockGroupSource
{
def apply(num: Int = 1)(implicit p: Parameters, valName: ValName) = LazyModule(new SimpleClockGroupSource(num)).node
}
case class FixedClockBroadcastNode(fixedClockOpt: Option[ClockParameters])(implicit valName: ValName)
extends NexusNode(ClockImp)(
dFn = { seq => fixedClockOpt.map(_ => ClockSourceParameters(give = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSourceParameters()) },
uFn = { seq => fixedClockOpt.map(_ => ClockSinkParameters(take = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSinkParameters()) },
inputRequiresOutput = false) {
def fixedClockResources(name: String, prefix: String = "soc/"): Seq[Option[FixedClockResource]] = Seq(fixedClockOpt.map(t => new FixedClockResource(name, t.freqMHz, prefix)))
}
class FixedClockBroadcast(fixedClockOpt: Option[ClockParameters])(implicit p: Parameters) extends LazyModule
{
val node = new FixedClockBroadcastNode(fixedClockOpt) {
override def circuitIdentity = outputs.size == 1
}
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in(0)
val (out, _) = node.out.unzip
override def desiredName = s"FixedClockBroadcast_${out.size}"
require (node.in.size == 1, "FixedClockBroadcast can only broadcast a single clock")
out.foreach { _ := in }
}
}
object FixedClockBroadcast
{
def apply(fixedClockOpt: Option[ClockParameters] = None)(implicit p: Parameters, valName: ValName) = LazyModule(new FixedClockBroadcast(fixedClockOpt)).node
}
case class PRCIClockGroupNode()(implicit valName: ValName)
extends NexusNode(ClockGroupImp)(
dFn = { _ => ClockGroupSourceParameters() },
uFn = { _ => ClockGroupSinkParameters("prci", Nil) },
outputRequiresInput = false)
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module FixedClockBroadcast_5( // @[ClockGroup.scala:104:9]
input auto_anon_in_clock, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_reset, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_4_clock, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_4_reset, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_3_clock, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_3_reset, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_2_clock, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_2_reset, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_1_clock, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_1_reset, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_0_clock, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_0_reset // @[LazyModuleImp.scala:107:25]
);
wire auto_anon_in_clock_0 = auto_anon_in_clock; // @[ClockGroup.scala:104:9]
wire auto_anon_in_reset_0 = auto_anon_in_reset; // @[ClockGroup.scala:104:9]
wire childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire anonIn_clock = auto_anon_in_clock_0; // @[ClockGroup.scala:104:9]
wire anonIn_reset = auto_anon_in_reset_0; // @[ClockGroup.scala:104:9]
wire x1_anonOut_3_clock; // @[MixedNode.scala:542:17]
wire x1_anonOut_3_reset; // @[MixedNode.scala:542:17]
wire x1_anonOut_2_clock; // @[MixedNode.scala:542:17]
wire x1_anonOut_2_reset; // @[MixedNode.scala:542:17]
wire x1_anonOut_1_clock; // @[MixedNode.scala:542:17]
wire x1_anonOut_1_reset; // @[MixedNode.scala:542:17]
wire x1_anonOut_clock; // @[MixedNode.scala:542:17]
wire x1_anonOut_reset; // @[MixedNode.scala:542:17]
wire anonOut_clock; // @[MixedNode.scala:542:17]
wire anonOut_reset; // @[MixedNode.scala:542:17]
wire auto_anon_out_4_clock_0; // @[ClockGroup.scala:104:9]
wire auto_anon_out_4_reset_0; // @[ClockGroup.scala:104:9]
wire auto_anon_out_3_clock_0; // @[ClockGroup.scala:104:9]
wire auto_anon_out_3_reset_0; // @[ClockGroup.scala:104:9]
wire auto_anon_out_2_clock_0; // @[ClockGroup.scala:104:9]
wire auto_anon_out_2_reset_0; // @[ClockGroup.scala:104:9]
wire auto_anon_out_1_clock_0; // @[ClockGroup.scala:104:9]
wire auto_anon_out_1_reset_0; // @[ClockGroup.scala:104:9]
wire auto_anon_out_0_clock_0; // @[ClockGroup.scala:104:9]
wire auto_anon_out_0_reset_0; // @[ClockGroup.scala:104:9]
assign anonOut_clock = anonIn_clock; // @[MixedNode.scala:542:17, :551:17]
assign x1_anonOut_clock = anonIn_clock; // @[MixedNode.scala:542:17, :551:17]
assign x1_anonOut_1_clock = anonIn_clock; // @[MixedNode.scala:542:17, :551:17]
assign x1_anonOut_2_clock = anonIn_clock; // @[MixedNode.scala:542:17, :551:17]
assign x1_anonOut_3_clock = anonIn_clock; // @[MixedNode.scala:542:17, :551:17]
assign anonOut_reset = anonIn_reset; // @[MixedNode.scala:542:17, :551:17]
assign x1_anonOut_reset = anonIn_reset; // @[MixedNode.scala:542:17, :551:17]
assign x1_anonOut_1_reset = anonIn_reset; // @[MixedNode.scala:542:17, :551:17]
assign x1_anonOut_2_reset = anonIn_reset; // @[MixedNode.scala:542:17, :551:17]
assign x1_anonOut_3_reset = anonIn_reset; // @[MixedNode.scala:542:17, :551:17]
assign auto_anon_out_0_clock_0 = anonOut_clock; // @[ClockGroup.scala:104:9]
assign auto_anon_out_0_reset_0 = anonOut_reset; // @[ClockGroup.scala:104:9]
assign auto_anon_out_1_clock_0 = x1_anonOut_clock; // @[ClockGroup.scala:104:9]
assign auto_anon_out_1_reset_0 = x1_anonOut_reset; // @[ClockGroup.scala:104:9]
assign auto_anon_out_2_clock_0 = x1_anonOut_1_clock; // @[ClockGroup.scala:104:9]
assign auto_anon_out_2_reset_0 = x1_anonOut_1_reset; // @[ClockGroup.scala:104:9]
assign auto_anon_out_3_clock_0 = x1_anonOut_2_clock; // @[ClockGroup.scala:104:9]
assign auto_anon_out_3_reset_0 = x1_anonOut_2_reset; // @[ClockGroup.scala:104:9]
assign auto_anon_out_4_clock_0 = x1_anonOut_3_clock; // @[ClockGroup.scala:104:9]
assign auto_anon_out_4_reset_0 = x1_anonOut_3_reset; // @[ClockGroup.scala:104:9]
assign auto_anon_out_4_clock = auto_anon_out_4_clock_0; // @[ClockGroup.scala:104:9]
assign auto_anon_out_4_reset = auto_anon_out_4_reset_0; // @[ClockGroup.scala:104:9]
assign auto_anon_out_3_clock = auto_anon_out_3_clock_0; // @[ClockGroup.scala:104:9]
assign auto_anon_out_3_reset = auto_anon_out_3_reset_0; // @[ClockGroup.scala:104:9]
assign auto_anon_out_2_clock = auto_anon_out_2_clock_0; // @[ClockGroup.scala:104:9]
assign auto_anon_out_2_reset = auto_anon_out_2_reset_0; // @[ClockGroup.scala:104:9]
assign auto_anon_out_1_clock = auto_anon_out_1_clock_0; // @[ClockGroup.scala:104:9]
assign auto_anon_out_1_reset = auto_anon_out_1_reset_0; // @[ClockGroup.scala:104:9]
assign auto_anon_out_0_clock = auto_anon_out_0_clock_0; // @[ClockGroup.scala:104:9]
assign auto_anon_out_0_reset = auto_anon_out_0_reset_0; // @[ClockGroup.scala:104:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File UnsafeAXI4ToTL.scala:
package ara
import chisel3._
import chisel3.util._
import freechips.rocketchip.amba._
import freechips.rocketchip.amba.axi4._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle {
val data = UInt(dataWidth.W)
val resp = UInt(respWidth.W)
val last = Bool()
val user = BundleMap(userFields)
}
/** Parameters for [[BaseReservableListBuffer]] and all child classes.
*
* @param numEntries Total number of elements that can be stored in the 'data' RAM
* @param numLists Maximum number of linked lists
* @param numBeats Maximum number of beats per entry
*/
case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) {
// Avoid zero-width wires when we call 'log2Ceil'
val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries)
val listBits = if (numLists == 1) 1 else log2Ceil(numLists)
val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats)
}
case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName)
extends MixedAdapterNode(AXI4Imp, TLImp)(
dFn = { case mp =>
TLMasterPortParameters.v2(
masters = mp.masters.zipWithIndex.map { case (m, i) =>
// Support 'numTlTxns' read requests and 'numTlTxns' write requests at once.
val numSourceIds = numTlTxns * 2
TLMasterParameters.v2(
name = m.name,
sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds),
nodePath = m.nodePath
)
},
echoFields = mp.echoFields,
requestFields = AMBAProtField() +: mp.requestFields,
responseKeys = mp.responseKeys
)
},
uFn = { mp =>
AXI4SlavePortParameters(
slaves = mp.managers.map { m =>
val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits))
AXI4SlaveParameters(
address = m.address,
resources = m.resources,
regionType = m.regionType,
executable = m.executable,
nodePath = m.nodePath,
supportsWrite = m.supportsPutPartial.intersect(maxXfer),
supportsRead = m.supportsGet.intersect(maxXfer),
interleavedId = Some(0) // TL2 never interleaves D beats
)
},
beatBytes = mp.beatBytes,
minLatency = mp.minLatency,
responseFields = mp.responseFields,
requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt)
)
}
)
class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule {
require(numTlTxns >= 1)
require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2")
val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt)
lazy val module = new LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
edgeIn.master.masters.foreach { m =>
require(m.aligned, "AXI4ToTL requires aligned requests")
}
val numIds = edgeIn.master.endId
val beatBytes = edgeOut.slave.beatBytes
val maxTransfer = edgeOut.slave.maxTransfer
val maxBeats = maxTransfer / beatBytes
// Look for an Error device to redirect bad requests
val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError")
require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.")
val errorDev = errorDevs.maxBy(_.maxTransfer)
val errorDevAddr = errorDev.address.head.base
require(
errorDev.supportsPutPartial.contains(maxTransfer),
s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer"
)
require(
errorDev.supportsGet.contains(maxTransfer),
s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer"
)
// All of the read-response reordering logic.
val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields)
val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats)
val listBuffer = if (numTlTxns > 1) {
Module(new ReservableListBuffer(listBufData, listBufParams))
} else {
Module(new PassthroughListBuffer(listBufData, listBufParams))
}
// To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to
// 0 for read requests and 1 for write requests.
val isReadSourceBit = 0.U(1.W)
val isWriteSourceBit = 1.U(1.W)
/* Read request logic */
val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val rBytes1 = in.ar.bits.bytes1()
val rSize = OH1ToUInt(rBytes1)
val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize)
val rId = if (numTlTxns > 1) {
Cat(isReadSourceBit, listBuffer.ioReservedIndex)
} else {
isReadSourceBit
}
val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Indicates if there are still valid TileLink source IDs left to use.
val canIssueR = listBuffer.ioReserve.ready
listBuffer.ioReserve.bits := in.ar.bits.id
listBuffer.ioReserve.valid := in.ar.valid && rOut.ready
in.ar.ready := rOut.ready && canIssueR
rOut.valid := in.ar.valid && canIssueR
rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2
rOut.bits.user :<= in.ar.bits.user
rOut.bits.user.lift(AMBAProt).foreach { rProt =>
rProt.privileged := in.ar.bits.prot(0)
rProt.secure := !in.ar.bits.prot(1)
rProt.fetch := in.ar.bits.prot(2)
rProt.bufferable := in.ar.bits.cache(0)
rProt.modifiable := in.ar.bits.cache(1)
rProt.readalloc := in.ar.bits.cache(2)
rProt.writealloc := in.ar.bits.cache(3)
}
/* Write request logic */
// Strip off the MSB, which identifies the transaction as read vs write.
val strippedResponseSourceId = if (numTlTxns > 1) {
out.d.bits.source((out.d.bits.source).getWidth - 2, 0)
} else {
// When there's only 1 TileLink transaction allowed for read/write, then this field is always 0.
0.U(1.W)
}
// Track when a write request burst is in progress.
val writeBurstBusy = RegInit(false.B)
when(in.w.fire) {
writeBurstBusy := !in.w.bits.last
}
val usedWriteIds = RegInit(0.U(numTlTxns.W))
val canIssueW = !usedWriteIds.andR
val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W))
val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W))
usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet
// Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't
// change mid-burst.
val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W))
val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy
val freeWriteIdIndex = OHToUInt(freeWriteIdOH)
freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds
val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val wBytes1 = in.aw.bits.bytes1()
val wSize = OH1ToUInt(wBytes1)
val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize)
val wId = if (numTlTxns > 1) {
Cat(isWriteSourceBit, freeWriteIdIndex)
} else {
isWriteSourceBit
}
val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain
// asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but
// the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb
// bits during a W-channel burst.
in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW
in.w.ready := wOut.ready && in.aw.valid && canIssueW
wOut.valid := in.aw.valid && in.w.valid && canIssueW
wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2
in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ }
wOut.bits.user :<= in.aw.bits.user
wOut.bits.user.lift(AMBAProt).foreach { wProt =>
wProt.privileged := in.aw.bits.prot(0)
wProt.secure := !in.aw.bits.prot(1)
wProt.fetch := in.aw.bits.prot(2)
wProt.bufferable := in.aw.bits.cache(0)
wProt.modifiable := in.aw.bits.cache(1)
wProt.readalloc := in.aw.bits.cache(2)
wProt.writealloc := in.aw.bits.cache(3)
}
// Merge the AXI4 read/write requests into the TL-A channel.
TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut))
/* Read/write response logic */
val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle)))
val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle)))
val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY)
val dHasData = edgeOut.hasData(out.d.bits)
val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d)
val dNumBeats1 = edgeOut.numBeats1(out.d.bits)
// Handle cases where writeack arrives before write is done
val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U
out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck)
listBuffer.ioDataOut.ready := okR.ready
okR.valid := listBuffer.ioDataOut.valid
okB.valid := out.d.valid && !dHasData && !writeEarlyAck
listBuffer.ioResponse.valid := out.d.valid && dHasData
listBuffer.ioResponse.bits.index := strippedResponseSourceId
listBuffer.ioResponse.bits.data.data := out.d.bits.data
listBuffer.ioResponse.bits.data.resp := dResp
listBuffer.ioResponse.bits.data.last := dLast
listBuffer.ioResponse.bits.data.user :<= out.d.bits.user
listBuffer.ioResponse.bits.count := dCount
listBuffer.ioResponse.bits.numBeats1 := dNumBeats1
okR.bits.id := listBuffer.ioDataOut.bits.listIndex
okR.bits.data := listBuffer.ioDataOut.bits.payload.data
okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp
okR.bits.last := listBuffer.ioDataOut.bits.payload.last
okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user
// Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write
// response, mark the write transaction as complete.
val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W))
val writeResponseId = writeIdMap.read(strippedResponseSourceId)
when(wOut.fire) {
writeIdMap.write(freeWriteIdIndex, in.aw.bits.id)
}
when(edgeOut.done(wOut)) {
usedWriteIdsSet := freeWriteIdOH
}
when(okB.fire) {
usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns)
}
okB.bits.id := writeResponseId
okB.bits.resp := dResp
okB.bits.user :<= out.d.bits.user
// AXI4 needs irrevocable behaviour
in.r <> Queue.irrevocable(okR, 1, flow = true)
in.b <> Queue.irrevocable(okB, 1, flow = true)
// Unused channels
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
/* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */
def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = {
val lReqType = reqType.toLowerCase
when(a.valid) {
assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U)
// Narrow transfers and FIXED bursts must be single-beat bursts.
when(a.bits.len =/= 0.U) {
assert(
a.bits.size === log2Ceil(beatBytes).U,
s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)",
1.U << a.bits.size,
a.bits.len + 1.U
)
assert(
a.bits.burst =/= AXI4Parameters.BURST_FIXED,
s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)",
a.bits.len + 1.U
)
}
// Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in
// particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink
// Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts.
}
}
checkRequest(in.ar, "Read")
checkRequest(in.aw, "Write")
}
}
}
object UnsafeAXI4ToTL {
def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = {
val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt))
axi42tl.node
}
}
/* ReservableListBuffer logic, and associated classes. */
class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle {
val index = UInt(params.entryBits.W)
val count = UInt(params.beatBits.W)
val numBeats1 = UInt(params.beatBits.W)
}
class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle {
val listIndex = UInt(params.listBits.W)
}
/** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */
abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends Module {
require(params.numEntries > 0)
require(params.numLists > 0)
val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W))))
val ioReservedIndex = IO(Output(UInt(params.entryBits.W)))
val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params))))
val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params)))
}
/** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve
* linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the
* 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a
* given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order.
*
* ==Constructor==
* @param gen Chisel type of linked list data element
* @param params Other parameters
*
* ==Module IO==
* @param ioReserve Index of list to reserve a new element in
* @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire'
* @param ioResponse Payload containing response data and linked-list-entry index
* @param ioDataOut Payload containing data read from response linked list and linked list index
*/
class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
val valid = RegInit(0.U(params.numLists.W))
val head = Mem(params.numLists, UInt(params.entryBits.W))
val tail = Mem(params.numLists, UInt(params.entryBits.W))
val used = RegInit(0.U(params.numEntries.W))
val next = Mem(params.numEntries, UInt(params.entryBits.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) }
val dataIsPresent = RegInit(0.U(params.numEntries.W))
val beats = Mem(params.numEntries, UInt(params.beatBits.W))
// The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower.
val dataMemReadEnable = WireDefault(false.B)
val dataMemWriteEnable = WireDefault(false.B)
assert(!(dataMemReadEnable && dataMemWriteEnable))
// 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the
// lowest-index entry in the 'data' RAM which is free.
val freeOH = Wire(UInt(params.numEntries.W))
val freeIndex = OHToUInt(freeOH)
freeOH := ~(leftOR(~used) << 1) & ~used
ioReservedIndex := freeIndex
val validSet = WireDefault(0.U(params.numLists.W))
val validClr = WireDefault(0.U(params.numLists.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
val dataIsPresentSet = WireDefault(0.U(params.numEntries.W))
val dataIsPresentClr = WireDefault(0.U(params.numEntries.W))
valid := (valid & ~validClr) | validSet
used := (used & ~usedClr) | usedSet
dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet
/* Reservation logic signals */
val reserveTail = Wire(UInt(params.entryBits.W))
val reserveIsValid = Wire(Bool())
/* Response logic signals */
val responseIndex = Wire(UInt(params.entryBits.W))
val responseListIndex = Wire(UInt(params.listBits.W))
val responseHead = Wire(UInt(params.entryBits.W))
val responseTail = Wire(UInt(params.entryBits.W))
val nextResponseHead = Wire(UInt(params.entryBits.W))
val nextDataIsPresent = Wire(Bool())
val isResponseInOrder = Wire(Bool())
val isEndOfList = Wire(Bool())
val isLastBeat = Wire(Bool())
val isLastResponseBeat = Wire(Bool())
val isLastUnwindBeat = Wire(Bool())
/* Reservation logic */
reserveTail := tail.read(ioReserve.bits)
reserveIsValid := valid(ioReserve.bits)
ioReserve.ready := !used.andR
// When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we
// actually start a new list, rather than appending to a list that's about to disappear.
val reserveResponseSameList = ioReserve.bits === responseListIndex
val appendToAndDestroyList =
ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat
when(ioReserve.fire) {
validSet := UIntToOH(ioReserve.bits, params.numLists)
usedSet := freeOH
when(reserveIsValid && !appendToAndDestroyList) {
next.write(reserveTail, freeIndex)
}.otherwise {
head.write(ioReserve.bits, freeIndex)
}
tail.write(ioReserve.bits, freeIndex)
map.write(freeIndex, ioReserve.bits)
}
/* Response logic */
// The majority of the response logic (reading from and writing to the various RAMs) is common between the
// response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid).
// The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the
// 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and
// response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after
// two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker.
responseHead := head.read(responseListIndex)
responseTail := tail.read(responseListIndex)
nextResponseHead := next.read(responseIndex)
nextDataIsPresent := dataIsPresent(nextResponseHead)
// Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since
// there isn't a next element in the linked list.
isResponseInOrder := responseHead === responseIndex
isEndOfList := responseHead === responseTail
isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1
// When a response's last beat is sent to the output channel, mark it as completed. This can happen in two
// situations:
// 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM
// reservation was never needed.
// 2. An entry is read out of the 'data' SRAM (within the unwind FSM).
when(ioDataOut.fire && isLastBeat) {
// Mark the reservation as no-longer-used.
usedClr := UIntToOH(responseIndex, params.numEntries)
// If the response is in-order, then we're popping an element from this linked list.
when(isEndOfList) {
// Once we pop the last element from a linked list, mark it as no-longer-present.
validClr := UIntToOH(responseListIndex, params.numLists)
}.otherwise {
// Move the linked list's head pointer to the new head pointer.
head.write(responseListIndex, nextResponseHead)
}
}
// If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding.
when(ioResponse.fire && !isResponseInOrder) {
dataMemWriteEnable := true.B
when(isLastResponseBeat) {
dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries)
beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1)
}
}
// Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to.
val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats)
(responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) =>
when(select && dataMemWriteEnable) {
seqMem.write(ioResponse.bits.index, ioResponse.bits.data)
}
}
/* Response unwind logic */
// Unwind FSM state definitions
val sIdle :: sUnwinding :: Nil = Enum(2)
val unwindState = RegInit(sIdle)
val busyUnwinding = unwindState === sUnwinding
val startUnwind = Wire(Bool())
val stopUnwind = Wire(Bool())
when(startUnwind) {
unwindState := sUnwinding
}.elsewhen(stopUnwind) {
unwindState := sIdle
}
assert(!(startUnwind && stopUnwind))
// Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to
// become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is
// invalid.
//
// Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to
// worry about overwriting the 'data' SRAM's output when we start the unwind FSM.
startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent
// Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of
// two things happens:
// 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent)
// 2. There are no more outstanding responses in this list (isEndOfList)
//
// Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are
// passing from 'ioResponse' to 'ioDataOut'.
stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList)
val isUnwindBurstOver = Wire(Bool())
val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable)
// Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of
// beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we
// increment 'beatCounter' until it reaches 'unwindBeats1'.
val unwindBeats1 = Reg(UInt(params.beatBits.W))
val nextBeatCounter = Wire(UInt(params.beatBits.W))
val beatCounter = RegNext(nextBeatCounter)
isUnwindBurstOver := beatCounter === unwindBeats1
when(startNewBurst) {
unwindBeats1 := beats.read(nextResponseHead)
nextBeatCounter := 0.U
}.elsewhen(dataMemReadEnable) {
nextBeatCounter := beatCounter + 1.U
}.otherwise {
nextBeatCounter := beatCounter
}
// When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next
// entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which
// happens at the start of reading a new stored burst).
val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst)
responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index)
// Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the
// SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead
// holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'.
val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex)
// The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid
// until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle).
val unwindDataIsValid = RegInit(false.B)
when(dataMemReadEnable) {
unwindDataIsValid := true.B
}.elsewhen(ioDataOut.fire) {
unwindDataIsValid := false.B
}
isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid
// Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats.
isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat)
// Select which SRAM to read from based on the beat counter.
val dataOutputVec = Wire(Vec(params.numBeats, gen))
val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats)
(nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) =>
dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable)
}
// Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured
// by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading
// from.
val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable)
// Mark 'data' burst entries as no-longer-present as they get read out of the SRAM.
when(dataMemReadEnable) {
dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries)
}
// As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue
// a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know
// we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be
// consumed by the output channel).
val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready
dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem)
// While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need
// 'responseListIndex' to be coherent for the entire unwind process.
val rawResponseListIndex = map.read(responseIndex)
val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst)
responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex)
// Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are
// just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that
// could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be
// single-ported.
ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding
// Either pass an in-order response to the output channel, or data read from the unwind FSM.
ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder)
ioDataOut.bits.listIndex := responseListIndex
ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data)
// It's an error to get a response that isn't associated with a valid linked list.
when(ioResponse.fire || unwindDataIsValid) {
assert(
valid(responseListIndex),
"No linked list exists at index %d, mapped from %d",
responseListIndex,
responseIndex
)
}
when(busyUnwinding && dataMemReadEnable) {
assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order")
}
}
/** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1.
*
* Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to
* reorder any responses, or store any linked lists.
*/
class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1")
val used = RegInit(0.U(params.numEntries.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
used := (used & ~usedClr) | usedSet
ioReserve.ready := used === 0.U
// Store which list index was reserved, we need to return this value when we get a response.
when(ioReserve.fire) {
usedSet := 1.U
map.write(0.U, ioReserve.bits)
}
// There's only one valid linked list entry, which is at index 0.
ioReservedIndex := 0.U
val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1
// Mark the linked list as empty when we get the last beat in a response.
// Note that 'ioResponse.fire === ioDataOut.fire'.
when(ioResponse.fire && isLastResponseBeat) {
usedClr := 1.U
}
// Always pass the response data straight through, since we never need to reorder the response data.
ioDataOut.bits.listIndex := map.read(0.U)
ioDataOut.bits.payload := ioResponse.bits.data
ioDataOut.valid := ioResponse.valid
ioResponse.ready := ioDataOut.ready
}
| module dataMems_137( // @[UnsafeAXI4ToTL.scala:365:62]
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [66:0] R0_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [66:0] W0_data
);
dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62]
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data)
); // @[UnsafeAXI4ToTL.scala:365:62]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module PE_370( // @[PE.scala:31:7]
input clock, // @[PE.scala:31:7]
input reset, // @[PE.scala:31:7]
input [7:0] io_in_a, // @[PE.scala:35:14]
input [19:0] io_in_b, // @[PE.scala:35:14]
input [19:0] io_in_d, // @[PE.scala:35:14]
output [7:0] io_out_a, // @[PE.scala:35:14]
output [19:0] io_out_b, // @[PE.scala:35:14]
output [19:0] io_out_c, // @[PE.scala:35:14]
input io_in_control_dataflow, // @[PE.scala:35:14]
input io_in_control_propagate, // @[PE.scala:35:14]
input [4:0] io_in_control_shift, // @[PE.scala:35:14]
output io_out_control_dataflow, // @[PE.scala:35:14]
output io_out_control_propagate, // @[PE.scala:35:14]
output [4:0] io_out_control_shift, // @[PE.scala:35:14]
input [2:0] io_in_id, // @[PE.scala:35:14]
output [2:0] io_out_id, // @[PE.scala:35:14]
input io_in_last, // @[PE.scala:35:14]
output io_out_last, // @[PE.scala:35:14]
input io_in_valid, // @[PE.scala:35:14]
output io_out_valid, // @[PE.scala:35:14]
output io_bad_dataflow // @[PE.scala:35:14]
);
wire [19:0] _mac_unit_io_out_d; // @[PE.scala:64:24]
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7]
wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7]
wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7]
wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7]
wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7]
wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7]
wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7]
wire io_in_last_0 = io_in_last; // @[PE.scala:31:7]
wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7]
wire io_bad_dataflow_0 = 1'h0; // @[PE.scala:31:7]
wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7]
wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37]
wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37]
wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35]
wire [19:0] c1_lo_1 = io_in_d_0; // @[PE.scala:31:7]
wire [19:0] c2_lo_1 = io_in_d_0; // @[PE.scala:31:7]
wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7]
wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7]
wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7]
wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7]
wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7]
wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7]
wire [19:0] io_out_b_0; // @[PE.scala:31:7]
wire [19:0] io_out_c_0; // @[PE.scala:31:7]
reg [31:0] c1; // @[PE.scala:70:15]
wire [31:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15]
wire [31:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38]
reg [31:0] c2; // @[PE.scala:71:15]
wire [31:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15]
wire [31:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38]
reg last_s; // @[PE.scala:89:25]
wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21]
wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25]
wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25]
wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32]
wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32]
wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25]
wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53]
wire [31:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15]
wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}]
wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25]
wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27]
wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27]
wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_7 = _io_out_c_zeros_T_1 & _io_out_c_zeros_T_6; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}]
wire [31:0] _GEN_2 = {27'h0, shift_offset}; // @[PE.scala:91:25]
wire [31:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15]
wire [31:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30]
wire [31:0] _io_out_c_T; // @[Arithmetic.scala:107:15]
assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33]
wire [32:0] _io_out_c_T_2 = {_io_out_c_T[31], _io_out_c_T} + {{31{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}]
wire [31:0] _io_out_c_T_3 = _io_out_c_T_2[31:0]; // @[Arithmetic.scala:107:28]
wire [31:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28]
wire _io_out_c_T_5 = $signed(_io_out_c_T_4) > 32'sh7FFFF; // @[Arithmetic.scala:107:28, :125:33]
wire _io_out_c_T_6 = $signed(_io_out_c_T_4) < -32'sh80000; // @[Arithmetic.scala:107:28, :125:60]
wire [31:0] _io_out_c_T_7 = _io_out_c_T_6 ? 32'hFFF80000 : _io_out_c_T_4; // @[Mux.scala:126:16]
wire [31:0] _io_out_c_T_8 = _io_out_c_T_5 ? 32'h7FFFF : _io_out_c_T_7; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_9 = _io_out_c_T_8[19:0]; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37]
wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37]
wire c1_sign = io_in_d_0[19]; // @[PE.scala:31:7]
wire c2_sign = io_in_d_0[19]; // @[PE.scala:31:7]
wire [1:0] _GEN_4 = {2{c1_sign}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] c1_lo_lo_hi; // @[Arithmetic.scala:118:18]
assign c1_lo_lo_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_lo_hi_hi; // @[Arithmetic.scala:118:18]
assign c1_lo_hi_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_hi_lo_hi; // @[Arithmetic.scala:118:18]
assign c1_hi_lo_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_hi_hi_hi; // @[Arithmetic.scala:118:18]
assign c1_hi_hi_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [2:0] c1_lo_lo = {c1_lo_lo_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c1_lo_hi = {c1_lo_hi_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c1_lo = {c1_lo_hi, c1_lo_lo}; // @[Arithmetic.scala:118:18]
wire [2:0] c1_hi_lo = {c1_hi_lo_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c1_hi_hi = {c1_hi_hi_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c1_hi = {c1_hi_hi, c1_hi_lo}; // @[Arithmetic.scala:118:18]
wire [11:0] _c1_T = {c1_hi, c1_lo}; // @[Arithmetic.scala:118:18]
wire [31:0] _c1_T_1 = {_c1_T, c1_lo_1}; // @[Arithmetic.scala:118:{14,18}]
wire [31:0] _c1_T_2 = _c1_T_1; // @[Arithmetic.scala:118:{14,61}]
wire [31:0] _c1_WIRE = _c1_T_2; // @[Arithmetic.scala:118:61]
wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53]
wire [31:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15]
wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}]
wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_16 = _io_out_c_zeros_T_10 & _io_out_c_zeros_T_15; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}]
wire [31:0] _GEN_5 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15]
wire [31:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T_1 = _GEN_5; // @[Arithmetic.scala:103:30]
wire [31:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15]
assign _io_out_c_T_11 = _GEN_5; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33]
wire [32:0] _io_out_c_T_13 = {_io_out_c_T_11[31], _io_out_c_T_11} + {{31{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}]
wire [31:0] _io_out_c_T_14 = _io_out_c_T_13[31:0]; // @[Arithmetic.scala:107:28]
wire [31:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28]
wire _io_out_c_T_16 = $signed(_io_out_c_T_15) > 32'sh7FFFF; // @[Arithmetic.scala:107:28, :125:33]
wire _io_out_c_T_17 = $signed(_io_out_c_T_15) < -32'sh80000; // @[Arithmetic.scala:107:28, :125:60]
wire [31:0] _io_out_c_T_18 = _io_out_c_T_17 ? 32'hFFF80000 : _io_out_c_T_15; // @[Mux.scala:126:16]
wire [31:0] _io_out_c_T_19 = _io_out_c_T_16 ? 32'h7FFFF : _io_out_c_T_18; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_20 = _io_out_c_T_19[19:0]; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37]
wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37]
wire [1:0] _GEN_6 = {2{c2_sign}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] c2_lo_lo_hi; // @[Arithmetic.scala:118:18]
assign c2_lo_lo_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_lo_hi_hi; // @[Arithmetic.scala:118:18]
assign c2_lo_hi_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_hi_lo_hi; // @[Arithmetic.scala:118:18]
assign c2_hi_lo_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_hi_hi_hi; // @[Arithmetic.scala:118:18]
assign c2_hi_hi_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [2:0] c2_lo_lo = {c2_lo_lo_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c2_lo_hi = {c2_lo_hi_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c2_lo = {c2_lo_hi, c2_lo_lo}; // @[Arithmetic.scala:118:18]
wire [2:0] c2_hi_lo = {c2_hi_lo_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c2_hi_hi = {c2_hi_hi_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c2_hi = {c2_hi_hi, c2_hi_lo}; // @[Arithmetic.scala:118:18]
wire [11:0] _c2_T = {c2_hi, c2_lo}; // @[Arithmetic.scala:118:18]
wire [31:0] _c2_T_1 = {_c2_T, c2_lo_1}; // @[Arithmetic.scala:118:{14,18}]
wire [31:0] _c2_T_2 = _c2_T_1; // @[Arithmetic.scala:118:{14,61}]
wire [31:0] _c2_WIRE = _c2_T_2; // @[Arithmetic.scala:118:61]
wire [31:0] _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38]
wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5[7:0]; // @[PE.scala:121:38]
wire [31:0] _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38]
wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7[7:0]; // @[PE.scala:127:38]
assign io_out_c_0 = io_in_control_dataflow_0 ? (io_in_control_propagate_0 ? c1[19:0] : c2[19:0]) : io_in_control_propagate_0 ? _io_out_c_T_10 : _io_out_c_T_21; // @[PE.scala:31:7, :70:15, :71:15, :102:95, :103:30, :104:16, :111:16, :118:101, :119:30, :120:16, :126:16]
assign io_out_b_0 = io_in_control_dataflow_0 ? _mac_unit_io_out_d : io_in_b_0; // @[PE.scala:31:7, :64:24, :102:95, :103:30, :118:101]
wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35]
wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35]
wire [31:0] _GEN_7 = {{12{io_in_d_0[19]}}, io_in_d_0}; // @[PE.scala:31:7, :124:10]
wire [31:0] _GEN_8 = {{12{_mac_unit_io_out_d[19]}}, _mac_unit_io_out_d}; // @[PE.scala:64:24, :108:10]
always @(posedge clock) begin // @[PE.scala:31:7]
if (io_in_valid_0) begin // @[PE.scala:31:7]
if (io_in_control_dataflow_0) begin // @[PE.scala:31:7]
if (io_in_control_dataflow_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :70:15, :118:101, :119:30, :124:10]
c1 <= _GEN_7; // @[PE.scala:70:15, :124:10]
if (~io_in_control_dataflow_0 | io_in_control_propagate_0) begin // @[PE.scala:31:7, :71:15, :118:101, :119:30]
end
else // @[PE.scala:71:15, :118:101, :119:30]
c2 <= _GEN_7; // @[PE.scala:71:15, :124:10]
end
else begin // @[PE.scala:31:7]
c1 <= io_in_control_propagate_0 ? _c1_WIRE : _GEN_8; // @[PE.scala:31:7, :70:15, :103:30, :108:10, :109:10, :115:10]
c2 <= io_in_control_propagate_0 ? _GEN_8 : _c2_WIRE; // @[PE.scala:31:7, :71:15, :103:30, :108:10, :116:10]
end
last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25]
end
always @(posedge)
MacUnit_114 mac_unit ( // @[PE.scala:64:24]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0), // @[PE.scala:31:7]
.io_in_b (io_in_control_dataflow_0 ? (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3) : io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE : _mac_unit_io_in_b_WIRE_1), // @[PE.scala:31:7, :102:95, :103:30, :106:{24,37}, :113:{24,37}, :118:101, :119:30, :121:{24,38}, :127:{24,38}]
.io_in_c (io_in_control_dataflow_0 ? {{12{io_in_b_0[19]}}, io_in_b_0} : io_in_control_propagate_0 ? c2 : c1), // @[PE.scala:31:7, :70:15, :71:15, :102:95, :103:30, :107:24, :114:24, :118:101, :122:24]
.io_out_d (_mac_unit_io_out_d)
); // @[PE.scala:64:24]
assign io_out_a = io_out_a_0; // @[PE.scala:31:7]
assign io_out_b = io_out_b_0; // @[PE.scala:31:7]
assign io_out_c = io_out_c_0; // @[PE.scala:31:7]
assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7]
assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7]
assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7]
assign io_out_id = io_out_id_0; // @[PE.scala:31:7]
assign io_out_last = io_out_last_0; // @[PE.scala:31:7]
assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7]
assign io_bad_dataflow = io_bad_dataflow_0; // @[PE.scala:31:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File UnsafeAXI4ToTL.scala:
package ara
import chisel3._
import chisel3.util._
import freechips.rocketchip.amba._
import freechips.rocketchip.amba.axi4._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle {
val data = UInt(dataWidth.W)
val resp = UInt(respWidth.W)
val last = Bool()
val user = BundleMap(userFields)
}
/** Parameters for [[BaseReservableListBuffer]] and all child classes.
*
* @param numEntries Total number of elements that can be stored in the 'data' RAM
* @param numLists Maximum number of linked lists
* @param numBeats Maximum number of beats per entry
*/
case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) {
// Avoid zero-width wires when we call 'log2Ceil'
val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries)
val listBits = if (numLists == 1) 1 else log2Ceil(numLists)
val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats)
}
case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName)
extends MixedAdapterNode(AXI4Imp, TLImp)(
dFn = { case mp =>
TLMasterPortParameters.v2(
masters = mp.masters.zipWithIndex.map { case (m, i) =>
// Support 'numTlTxns' read requests and 'numTlTxns' write requests at once.
val numSourceIds = numTlTxns * 2
TLMasterParameters.v2(
name = m.name,
sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds),
nodePath = m.nodePath
)
},
echoFields = mp.echoFields,
requestFields = AMBAProtField() +: mp.requestFields,
responseKeys = mp.responseKeys
)
},
uFn = { mp =>
AXI4SlavePortParameters(
slaves = mp.managers.map { m =>
val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits))
AXI4SlaveParameters(
address = m.address,
resources = m.resources,
regionType = m.regionType,
executable = m.executable,
nodePath = m.nodePath,
supportsWrite = m.supportsPutPartial.intersect(maxXfer),
supportsRead = m.supportsGet.intersect(maxXfer),
interleavedId = Some(0) // TL2 never interleaves D beats
)
},
beatBytes = mp.beatBytes,
minLatency = mp.minLatency,
responseFields = mp.responseFields,
requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt)
)
}
)
class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule {
require(numTlTxns >= 1)
require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2")
val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt)
lazy val module = new LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
edgeIn.master.masters.foreach { m =>
require(m.aligned, "AXI4ToTL requires aligned requests")
}
val numIds = edgeIn.master.endId
val beatBytes = edgeOut.slave.beatBytes
val maxTransfer = edgeOut.slave.maxTransfer
val maxBeats = maxTransfer / beatBytes
// Look for an Error device to redirect bad requests
val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError")
require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.")
val errorDev = errorDevs.maxBy(_.maxTransfer)
val errorDevAddr = errorDev.address.head.base
require(
errorDev.supportsPutPartial.contains(maxTransfer),
s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer"
)
require(
errorDev.supportsGet.contains(maxTransfer),
s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer"
)
// All of the read-response reordering logic.
val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields)
val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats)
val listBuffer = if (numTlTxns > 1) {
Module(new ReservableListBuffer(listBufData, listBufParams))
} else {
Module(new PassthroughListBuffer(listBufData, listBufParams))
}
// To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to
// 0 for read requests and 1 for write requests.
val isReadSourceBit = 0.U(1.W)
val isWriteSourceBit = 1.U(1.W)
/* Read request logic */
val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val rBytes1 = in.ar.bits.bytes1()
val rSize = OH1ToUInt(rBytes1)
val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize)
val rId = if (numTlTxns > 1) {
Cat(isReadSourceBit, listBuffer.ioReservedIndex)
} else {
isReadSourceBit
}
val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Indicates if there are still valid TileLink source IDs left to use.
val canIssueR = listBuffer.ioReserve.ready
listBuffer.ioReserve.bits := in.ar.bits.id
listBuffer.ioReserve.valid := in.ar.valid && rOut.ready
in.ar.ready := rOut.ready && canIssueR
rOut.valid := in.ar.valid && canIssueR
rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2
rOut.bits.user :<= in.ar.bits.user
rOut.bits.user.lift(AMBAProt).foreach { rProt =>
rProt.privileged := in.ar.bits.prot(0)
rProt.secure := !in.ar.bits.prot(1)
rProt.fetch := in.ar.bits.prot(2)
rProt.bufferable := in.ar.bits.cache(0)
rProt.modifiable := in.ar.bits.cache(1)
rProt.readalloc := in.ar.bits.cache(2)
rProt.writealloc := in.ar.bits.cache(3)
}
/* Write request logic */
// Strip off the MSB, which identifies the transaction as read vs write.
val strippedResponseSourceId = if (numTlTxns > 1) {
out.d.bits.source((out.d.bits.source).getWidth - 2, 0)
} else {
// When there's only 1 TileLink transaction allowed for read/write, then this field is always 0.
0.U(1.W)
}
// Track when a write request burst is in progress.
val writeBurstBusy = RegInit(false.B)
when(in.w.fire) {
writeBurstBusy := !in.w.bits.last
}
val usedWriteIds = RegInit(0.U(numTlTxns.W))
val canIssueW = !usedWriteIds.andR
val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W))
val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W))
usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet
// Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't
// change mid-burst.
val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W))
val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy
val freeWriteIdIndex = OHToUInt(freeWriteIdOH)
freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds
val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val wBytes1 = in.aw.bits.bytes1()
val wSize = OH1ToUInt(wBytes1)
val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize)
val wId = if (numTlTxns > 1) {
Cat(isWriteSourceBit, freeWriteIdIndex)
} else {
isWriteSourceBit
}
val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain
// asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but
// the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb
// bits during a W-channel burst.
in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW
in.w.ready := wOut.ready && in.aw.valid && canIssueW
wOut.valid := in.aw.valid && in.w.valid && canIssueW
wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2
in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ }
wOut.bits.user :<= in.aw.bits.user
wOut.bits.user.lift(AMBAProt).foreach { wProt =>
wProt.privileged := in.aw.bits.prot(0)
wProt.secure := !in.aw.bits.prot(1)
wProt.fetch := in.aw.bits.prot(2)
wProt.bufferable := in.aw.bits.cache(0)
wProt.modifiable := in.aw.bits.cache(1)
wProt.readalloc := in.aw.bits.cache(2)
wProt.writealloc := in.aw.bits.cache(3)
}
// Merge the AXI4 read/write requests into the TL-A channel.
TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut))
/* Read/write response logic */
val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle)))
val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle)))
val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY)
val dHasData = edgeOut.hasData(out.d.bits)
val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d)
val dNumBeats1 = edgeOut.numBeats1(out.d.bits)
// Handle cases where writeack arrives before write is done
val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U
out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck)
listBuffer.ioDataOut.ready := okR.ready
okR.valid := listBuffer.ioDataOut.valid
okB.valid := out.d.valid && !dHasData && !writeEarlyAck
listBuffer.ioResponse.valid := out.d.valid && dHasData
listBuffer.ioResponse.bits.index := strippedResponseSourceId
listBuffer.ioResponse.bits.data.data := out.d.bits.data
listBuffer.ioResponse.bits.data.resp := dResp
listBuffer.ioResponse.bits.data.last := dLast
listBuffer.ioResponse.bits.data.user :<= out.d.bits.user
listBuffer.ioResponse.bits.count := dCount
listBuffer.ioResponse.bits.numBeats1 := dNumBeats1
okR.bits.id := listBuffer.ioDataOut.bits.listIndex
okR.bits.data := listBuffer.ioDataOut.bits.payload.data
okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp
okR.bits.last := listBuffer.ioDataOut.bits.payload.last
okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user
// Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write
// response, mark the write transaction as complete.
val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W))
val writeResponseId = writeIdMap.read(strippedResponseSourceId)
when(wOut.fire) {
writeIdMap.write(freeWriteIdIndex, in.aw.bits.id)
}
when(edgeOut.done(wOut)) {
usedWriteIdsSet := freeWriteIdOH
}
when(okB.fire) {
usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns)
}
okB.bits.id := writeResponseId
okB.bits.resp := dResp
okB.bits.user :<= out.d.bits.user
// AXI4 needs irrevocable behaviour
in.r <> Queue.irrevocable(okR, 1, flow = true)
in.b <> Queue.irrevocable(okB, 1, flow = true)
// Unused channels
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
/* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */
def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = {
val lReqType = reqType.toLowerCase
when(a.valid) {
assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U)
// Narrow transfers and FIXED bursts must be single-beat bursts.
when(a.bits.len =/= 0.U) {
assert(
a.bits.size === log2Ceil(beatBytes).U,
s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)",
1.U << a.bits.size,
a.bits.len + 1.U
)
assert(
a.bits.burst =/= AXI4Parameters.BURST_FIXED,
s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)",
a.bits.len + 1.U
)
}
// Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in
// particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink
// Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts.
}
}
checkRequest(in.ar, "Read")
checkRequest(in.aw, "Write")
}
}
}
object UnsafeAXI4ToTL {
def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = {
val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt))
axi42tl.node
}
}
/* ReservableListBuffer logic, and associated classes. */
class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle {
val index = UInt(params.entryBits.W)
val count = UInt(params.beatBits.W)
val numBeats1 = UInt(params.beatBits.W)
}
class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle {
val listIndex = UInt(params.listBits.W)
}
/** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */
abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends Module {
require(params.numEntries > 0)
require(params.numLists > 0)
val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W))))
val ioReservedIndex = IO(Output(UInt(params.entryBits.W)))
val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params))))
val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params)))
}
/** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve
* linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the
* 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a
* given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order.
*
* ==Constructor==
* @param gen Chisel type of linked list data element
* @param params Other parameters
*
* ==Module IO==
* @param ioReserve Index of list to reserve a new element in
* @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire'
* @param ioResponse Payload containing response data and linked-list-entry index
* @param ioDataOut Payload containing data read from response linked list and linked list index
*/
class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
val valid = RegInit(0.U(params.numLists.W))
val head = Mem(params.numLists, UInt(params.entryBits.W))
val tail = Mem(params.numLists, UInt(params.entryBits.W))
val used = RegInit(0.U(params.numEntries.W))
val next = Mem(params.numEntries, UInt(params.entryBits.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) }
val dataIsPresent = RegInit(0.U(params.numEntries.W))
val beats = Mem(params.numEntries, UInt(params.beatBits.W))
// The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower.
val dataMemReadEnable = WireDefault(false.B)
val dataMemWriteEnable = WireDefault(false.B)
assert(!(dataMemReadEnable && dataMemWriteEnable))
// 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the
// lowest-index entry in the 'data' RAM which is free.
val freeOH = Wire(UInt(params.numEntries.W))
val freeIndex = OHToUInt(freeOH)
freeOH := ~(leftOR(~used) << 1) & ~used
ioReservedIndex := freeIndex
val validSet = WireDefault(0.U(params.numLists.W))
val validClr = WireDefault(0.U(params.numLists.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
val dataIsPresentSet = WireDefault(0.U(params.numEntries.W))
val dataIsPresentClr = WireDefault(0.U(params.numEntries.W))
valid := (valid & ~validClr) | validSet
used := (used & ~usedClr) | usedSet
dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet
/* Reservation logic signals */
val reserveTail = Wire(UInt(params.entryBits.W))
val reserveIsValid = Wire(Bool())
/* Response logic signals */
val responseIndex = Wire(UInt(params.entryBits.W))
val responseListIndex = Wire(UInt(params.listBits.W))
val responseHead = Wire(UInt(params.entryBits.W))
val responseTail = Wire(UInt(params.entryBits.W))
val nextResponseHead = Wire(UInt(params.entryBits.W))
val nextDataIsPresent = Wire(Bool())
val isResponseInOrder = Wire(Bool())
val isEndOfList = Wire(Bool())
val isLastBeat = Wire(Bool())
val isLastResponseBeat = Wire(Bool())
val isLastUnwindBeat = Wire(Bool())
/* Reservation logic */
reserveTail := tail.read(ioReserve.bits)
reserveIsValid := valid(ioReserve.bits)
ioReserve.ready := !used.andR
// When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we
// actually start a new list, rather than appending to a list that's about to disappear.
val reserveResponseSameList = ioReserve.bits === responseListIndex
val appendToAndDestroyList =
ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat
when(ioReserve.fire) {
validSet := UIntToOH(ioReserve.bits, params.numLists)
usedSet := freeOH
when(reserveIsValid && !appendToAndDestroyList) {
next.write(reserveTail, freeIndex)
}.otherwise {
head.write(ioReserve.bits, freeIndex)
}
tail.write(ioReserve.bits, freeIndex)
map.write(freeIndex, ioReserve.bits)
}
/* Response logic */
// The majority of the response logic (reading from and writing to the various RAMs) is common between the
// response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid).
// The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the
// 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and
// response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after
// two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker.
responseHead := head.read(responseListIndex)
responseTail := tail.read(responseListIndex)
nextResponseHead := next.read(responseIndex)
nextDataIsPresent := dataIsPresent(nextResponseHead)
// Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since
// there isn't a next element in the linked list.
isResponseInOrder := responseHead === responseIndex
isEndOfList := responseHead === responseTail
isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1
// When a response's last beat is sent to the output channel, mark it as completed. This can happen in two
// situations:
// 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM
// reservation was never needed.
// 2. An entry is read out of the 'data' SRAM (within the unwind FSM).
when(ioDataOut.fire && isLastBeat) {
// Mark the reservation as no-longer-used.
usedClr := UIntToOH(responseIndex, params.numEntries)
// If the response is in-order, then we're popping an element from this linked list.
when(isEndOfList) {
// Once we pop the last element from a linked list, mark it as no-longer-present.
validClr := UIntToOH(responseListIndex, params.numLists)
}.otherwise {
// Move the linked list's head pointer to the new head pointer.
head.write(responseListIndex, nextResponseHead)
}
}
// If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding.
when(ioResponse.fire && !isResponseInOrder) {
dataMemWriteEnable := true.B
when(isLastResponseBeat) {
dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries)
beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1)
}
}
// Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to.
val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats)
(responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) =>
when(select && dataMemWriteEnable) {
seqMem.write(ioResponse.bits.index, ioResponse.bits.data)
}
}
/* Response unwind logic */
// Unwind FSM state definitions
val sIdle :: sUnwinding :: Nil = Enum(2)
val unwindState = RegInit(sIdle)
val busyUnwinding = unwindState === sUnwinding
val startUnwind = Wire(Bool())
val stopUnwind = Wire(Bool())
when(startUnwind) {
unwindState := sUnwinding
}.elsewhen(stopUnwind) {
unwindState := sIdle
}
assert(!(startUnwind && stopUnwind))
// Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to
// become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is
// invalid.
//
// Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to
// worry about overwriting the 'data' SRAM's output when we start the unwind FSM.
startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent
// Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of
// two things happens:
// 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent)
// 2. There are no more outstanding responses in this list (isEndOfList)
//
// Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are
// passing from 'ioResponse' to 'ioDataOut'.
stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList)
val isUnwindBurstOver = Wire(Bool())
val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable)
// Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of
// beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we
// increment 'beatCounter' until it reaches 'unwindBeats1'.
val unwindBeats1 = Reg(UInt(params.beatBits.W))
val nextBeatCounter = Wire(UInt(params.beatBits.W))
val beatCounter = RegNext(nextBeatCounter)
isUnwindBurstOver := beatCounter === unwindBeats1
when(startNewBurst) {
unwindBeats1 := beats.read(nextResponseHead)
nextBeatCounter := 0.U
}.elsewhen(dataMemReadEnable) {
nextBeatCounter := beatCounter + 1.U
}.otherwise {
nextBeatCounter := beatCounter
}
// When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next
// entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which
// happens at the start of reading a new stored burst).
val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst)
responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index)
// Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the
// SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead
// holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'.
val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex)
// The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid
// until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle).
val unwindDataIsValid = RegInit(false.B)
when(dataMemReadEnable) {
unwindDataIsValid := true.B
}.elsewhen(ioDataOut.fire) {
unwindDataIsValid := false.B
}
isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid
// Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats.
isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat)
// Select which SRAM to read from based on the beat counter.
val dataOutputVec = Wire(Vec(params.numBeats, gen))
val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats)
(nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) =>
dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable)
}
// Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured
// by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading
// from.
val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable)
// Mark 'data' burst entries as no-longer-present as they get read out of the SRAM.
when(dataMemReadEnable) {
dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries)
}
// As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue
// a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know
// we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be
// consumed by the output channel).
val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready
dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem)
// While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need
// 'responseListIndex' to be coherent for the entire unwind process.
val rawResponseListIndex = map.read(responseIndex)
val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst)
responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex)
// Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are
// just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that
// could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be
// single-ported.
ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding
// Either pass an in-order response to the output channel, or data read from the unwind FSM.
ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder)
ioDataOut.bits.listIndex := responseListIndex
ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data)
// It's an error to get a response that isn't associated with a valid linked list.
when(ioResponse.fire || unwindDataIsValid) {
assert(
valid(responseListIndex),
"No linked list exists at index %d, mapped from %d",
responseListIndex,
responseIndex
)
}
when(busyUnwinding && dataMemReadEnable) {
assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order")
}
}
/** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1.
*
* Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to
* reorder any responses, or store any linked lists.
*/
class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1")
val used = RegInit(0.U(params.numEntries.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
used := (used & ~usedClr) | usedSet
ioReserve.ready := used === 0.U
// Store which list index was reserved, we need to return this value when we get a response.
when(ioReserve.fire) {
usedSet := 1.U
map.write(0.U, ioReserve.bits)
}
// There's only one valid linked list entry, which is at index 0.
ioReservedIndex := 0.U
val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1
// Mark the linked list as empty when we get the last beat in a response.
// Note that 'ioResponse.fire === ioDataOut.fire'.
when(ioResponse.fire && isLastResponseBeat) {
usedClr := 1.U
}
// Always pass the response data straight through, since we never need to reorder the response data.
ioDataOut.bits.listIndex := map.read(0.U)
ioDataOut.bits.payload := ioResponse.bits.data
ioDataOut.valid := ioResponse.valid
ioResponse.ready := ioDataOut.ready
}
| module dataMems_319( // @[UnsafeAXI4ToTL.scala:365:62]
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [66:0] R0_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [66:0] W0_data
);
dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62]
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data)
); // @[UnsafeAXI4ToTL.scala:365:62]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftRegisterPriorityQueue.scala:
package compressacc
import chisel3._
import chisel3.util._
import chisel3.util._
// TODO : support enq & deq at the same cycle
class PriorityQueueStageIO(keyWidth: Int, value: ValueInfo) extends Bundle {
val output_prev = KeyValue(keyWidth, value)
val output_nxt = KeyValue(keyWidth, value)
val input_prev = Flipped(KeyValue(keyWidth, value))
val input_nxt = Flipped(KeyValue(keyWidth, value))
val cmd = Flipped(Valid(UInt(1.W)))
val insert_here = Input(Bool())
val cur_input_keyval = Flipped(KeyValue(keyWidth, value))
val cur_output_keyval = KeyValue(keyWidth, value)
}
class PriorityQueueStage(keyWidth: Int, value: ValueInfo) extends Module {
val io = IO(new PriorityQueueStageIO(keyWidth, value))
dontTouch(io)
val CMD_DEQ = 0.U
val CMD_ENQ = 1.U
val MAX_VALUE = (1 << keyWidth) - 1
val key_reg = RegInit(MAX_VALUE.U(keyWidth.W))
val value_reg = Reg(value)
io.output_prev.key := key_reg
io.output_prev.value := value_reg
io.output_nxt.key := key_reg
io.output_nxt.value := value_reg
io.cur_output_keyval.key := key_reg
io.cur_output_keyval.value := value_reg
when (io.cmd.valid) {
switch (io.cmd.bits) {
is (CMD_DEQ) {
key_reg := io.input_nxt.key
value_reg := io.input_nxt.value
}
is (CMD_ENQ) {
when (io.insert_here) {
key_reg := io.cur_input_keyval.key
value_reg := io.cur_input_keyval.value
} .elsewhen (key_reg >= io.cur_input_keyval.key) {
key_reg := io.input_prev.key
value_reg := io.input_prev.value
} .otherwise {
// do nothing
}
}
}
}
}
object PriorityQueueStage {
def apply(keyWidth: Int, v: ValueInfo): PriorityQueueStage = new PriorityQueueStage(keyWidth, v)
}
// TODO
// - This design is not scalable as the enqued_keyval is broadcasted to all the stages
// - Add pipeline registers later
class PriorityQueueIO(queSize: Int, keyWidth: Int, value: ValueInfo) extends Bundle {
val cnt_bits = log2Ceil(queSize+1)
val counter = Output(UInt(cnt_bits.W))
val enq = Flipped(Decoupled(KeyValue(keyWidth, value)))
val deq = Decoupled(KeyValue(keyWidth, value))
}
class PriorityQueue(queSize: Int, keyWidth: Int, value: ValueInfo) extends Module {
val keyWidthInternal = keyWidth + 1
val CMD_DEQ = 0.U
val CMD_ENQ = 1.U
val io = IO(new PriorityQueueIO(queSize, keyWidthInternal, value))
dontTouch(io)
val MAX_VALUE = ((1 << keyWidthInternal) - 1).U
val cnt_bits = log2Ceil(queSize+1)
// do not consider cases where we are inserting more entries then the queSize
val counter = RegInit(0.U(cnt_bits.W))
io.counter := counter
val full = (counter === queSize.U)
val empty = (counter === 0.U)
io.deq.valid := !empty
io.enq.ready := !full
when (io.enq.fire) {
counter := counter + 1.U
}
when (io.deq.fire) {
counter := counter - 1.U
}
val cmd_valid = io.enq.valid || io.deq.ready
val cmd = Mux(io.enq.valid, CMD_ENQ, CMD_DEQ)
assert(!(io.enq.valid && io.deq.ready))
val stages = Seq.fill(queSize)(Module(new PriorityQueueStage(keyWidthInternal, value)))
for (i <- 0 until (queSize - 1)) {
stages(i+1).io.input_prev <> stages(i).io.output_nxt
stages(i).io.input_nxt <> stages(i+1).io.output_prev
}
stages(queSize-1).io.input_nxt.key := MAX_VALUE
// stages(queSize-1).io.input_nxt.value :=
stages(queSize-1).io.input_nxt.value.symbol := 0.U
// stages(queSize-1).io.input_nxt.value.child(0) := 0.U
// stages(queSize-1).io.input_nxt.value.child(1) := 0.U
stages(0).io.input_prev.key := io.enq.bits.key
stages(0).io.input_prev.value <> io.enq.bits.value
for (i <- 0 until queSize) {
stages(i).io.cmd.valid := cmd_valid
stages(i).io.cmd.bits := cmd
stages(i).io.cur_input_keyval <> io.enq.bits
}
val is_large_or_equal = WireInit(VecInit(Seq.fill(queSize)(false.B)))
for (i <- 0 until queSize) {
is_large_or_equal(i) := (stages(i).io.cur_output_keyval.key >= io.enq.bits.key)
}
val is_large_or_equal_cat = Wire(UInt(queSize.W))
is_large_or_equal_cat := Cat(is_large_or_equal.reverse)
val insert_here_idx = PriorityEncoder(is_large_or_equal_cat)
for (i <- 0 until queSize) {
when (i.U === insert_here_idx) {
stages(i).io.insert_here := true.B
} .otherwise {
stages(i).io.insert_here := false.B
}
}
io.deq.bits <> stages(0).io.output_prev
}
| module PriorityQueueStage_9( // @[ShiftRegisterPriorityQueue.scala:21:7]
input clock, // @[ShiftRegisterPriorityQueue.scala:21:7]
input reset, // @[ShiftRegisterPriorityQueue.scala:21:7]
output [30:0] io_output_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_output_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [30:0] io_output_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_output_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_input_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_input_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_input_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_input_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_cmd_valid, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_cmd_bits, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_insert_here, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_cur_input_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_cur_input_keyval_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [30:0] io_cur_output_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_cur_output_keyval_value_symbol // @[ShiftRegisterPriorityQueue.scala:22:14]
);
wire [30:0] io_input_prev_key_0 = io_input_prev_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_input_prev_value_symbol_0 = io_input_prev_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_input_nxt_key_0 = io_input_nxt_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_input_nxt_value_symbol_0 = io_input_nxt_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_cmd_valid_0 = io_cmd_valid; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_cmd_bits_0 = io_cmd_bits; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_insert_here_0 = io_insert_here; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_cur_input_keyval_key_0 = io_cur_input_keyval_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_cur_input_keyval_value_symbol_0 = io_cur_input_keyval_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
reg [30:0] key_reg; // @[ShiftRegisterPriorityQueue.scala:30:24]
assign io_output_prev_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
assign io_output_nxt_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
assign io_cur_output_keyval_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
reg [9:0] value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:31:22]
assign io_output_prev_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
assign io_output_nxt_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
assign io_cur_output_keyval_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
wire _T_2 = key_reg >= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24, :52:30]
always @(posedge clock) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (reset) // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= 31'h7FFFFFFF; // @[ShiftRegisterPriorityQueue.scala:30:24]
else if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30]
key_reg <= io_input_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
end
else // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= io_input_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
end
if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7]
value_reg_symbol <= io_cur_input_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30]
value_reg_symbol <= io_input_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
end
else // @[ShiftRegisterPriorityQueue.scala:21:7]
value_reg_symbol <= io_input_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
end
always @(posedge)
assign io_output_prev_key = io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_prev_value_symbol = io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_nxt_key = io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_nxt_value_symbol = io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_cur_output_keyval_key = io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_cur_output_keyval_value_symbol = io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File SourceD.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
import TLMessages._
import TLAtomics._
import TLPermissions._
class SourceDRequest(params: InclusiveCacheParameters) extends FullRequest(params)
{
val sink = UInt(params.inner.bundle.sinkBits.W)
val way = UInt(params.wayBits.W)
val bad = Bool()
}
class SourceDHazard(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val way = UInt(params.wayBits.W)
}
class PutBufferACEntry(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val data = UInt(params.inner.bundle.dataBits.W)
val mask = UInt((params.inner.bundle.dataBits/8).W)
val corrupt = Bool()
}
class SourceD(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val req = Flipped(Decoupled(new SourceDRequest(params)))
val d = Decoupled(new TLBundleD(params.inner.bundle))
// Put data from SinkA
val pb_pop = Decoupled(new PutBufferPop(params))
val pb_beat = Flipped(new PutBufferAEntry(params))
// Release data from SinkC
val rel_pop = Decoupled(new PutBufferPop(params))
val rel_beat = Flipped(new PutBufferCEntry(params))
// Access to the BankedStore
val bs_radr = Decoupled(new BankedStoreInnerAddress(params))
val bs_rdat = Flipped(new BankedStoreInnerDecoded(params))
val bs_wadr = Decoupled(new BankedStoreInnerAddress(params))
val bs_wdat = new BankedStoreInnerPoison(params)
// Is it safe to evict/replace this way?
val evict_req = Flipped(new SourceDHazard(params))
val evict_safe = Bool()
val grant_req = Flipped(new SourceDHazard(params))
val grant_safe = Bool()
})
val beatBytes = params.inner.manager.beatBytes
val writeBytes = params.micro.writeBytes
val s1_valid = Wire(Bool())
val s2_valid = Wire(Bool())
val s3_valid = Wire(Bool())
val s2_ready = Wire(Bool())
val s3_ready = Wire(Bool())
val s4_ready = Wire(Bool())
////////////////////////////////////// STAGE 1 //////////////////////////////////////
// Reform the request beats
val busy = RegInit(false.B)
val s1_block_r = RegInit(false.B)
val s1_counter = RegInit(0.U(params.innerBeatBits.W))
val s1_req_reg = RegEnable(io.req.bits, !busy && io.req.valid)
val s1_req = Mux(!busy, io.req.bits, s1_req_reg)
val s1_x_bypass = Wire(UInt((beatBytes/writeBytes).W)) // might go from high=>low during stall
val s1_latch_bypass = RegNext(!(busy || io.req.valid) || s2_ready)
val s1_bypass = Mux(s1_latch_bypass, s1_x_bypass, RegEnable(s1_x_bypass, s1_latch_bypass))
val s1_mask = MaskGen(s1_req.offset, s1_req.size, beatBytes, writeBytes) & ~s1_bypass
val s1_grant = (s1_req.opcode === AcquireBlock && s1_req.param === BtoT) || s1_req.opcode === AcquirePerm
val s1_need_r = s1_mask.orR && s1_req.prio(0) && s1_req.opcode =/= Hint && !s1_grant &&
(s1_req.opcode =/= PutFullData || s1_req.size < log2Ceil(writeBytes).U )
val s1_valid_r = (busy || io.req.valid) && s1_need_r && !s1_block_r
val s1_need_pb = Mux(s1_req.prio(0), !s1_req.opcode(2), s1_req.opcode(0)) // hasData
val s1_single = Mux(s1_req.prio(0), s1_req.opcode === Hint || s1_grant, s1_req.opcode === Release)
val s1_retires = !s1_single // retire all operations with data in s3 for bypass (saves energy)
// Alternatively: val s1_retires = s1_need_pb // retire only updates for bypass (less backpressure from WB)
val s1_beats1 = Mux(s1_single, 0.U, UIntToOH1(s1_req.size, log2Up(params.cache.blockBytes)) >> log2Ceil(beatBytes))
val s1_beat = (s1_req.offset >> log2Ceil(beatBytes)) | s1_counter
val s1_last = s1_counter === s1_beats1
val s1_first = s1_counter === 0.U
params.ccover(s1_block_r, "SOURCED_1_SRAM_HOLD", "SRAM read-out successful, but stalled by stage 2")
params.ccover(!s1_latch_bypass, "SOURCED_1_BYPASS_HOLD", "Bypass match successful, but stalled by stage 2")
params.ccover((busy || io.req.valid) && !s1_need_r, "SOURCED_1_NO_MODIFY", "Transaction servicable without SRAM")
io.bs_radr.valid := s1_valid_r
io.bs_radr.bits.noop := false.B
io.bs_radr.bits.way := s1_req.way
io.bs_radr.bits.set := s1_req.set
io.bs_radr.bits.beat := s1_beat
io.bs_radr.bits.mask := s1_mask
params.ccover(io.bs_radr.valid && !io.bs_radr.ready, "SOURCED_1_READ_STALL", "Data readout stalled")
// Make a queue to catch BS readout during stalls
val queue = Module(new Queue(chiselTypeOf(io.bs_rdat), 3, flow=true))
queue.io.enq.valid := RegNext(RegNext(io.bs_radr.fire))
queue.io.enq.bits := io.bs_rdat
assert (!queue.io.enq.valid || queue.io.enq.ready)
params.ccover(!queue.io.enq.ready, "SOURCED_1_QUEUE_FULL", "Filled SRAM skidpad queue completely")
when (io.bs_radr.fire) { s1_block_r := true.B }
when (io.req.valid) { busy := true.B }
when (s1_valid && s2_ready) {
s1_counter := s1_counter + 1.U
s1_block_r := false.B
when (s1_last) {
s1_counter := 0.U
busy := false.B
}
}
params.ccover(s1_valid && !s2_ready, "SOURCED_1_STALL", "Stage 1 pipeline blocked")
io.req.ready := !busy
s1_valid := (busy || io.req.valid) && (!s1_valid_r || io.bs_radr.ready)
////////////////////////////////////// STAGE 2 //////////////////////////////////////
// Fetch the request data
val s2_latch = s1_valid && s2_ready
val s2_full = RegInit(false.B)
val s2_valid_pb = RegInit(false.B)
val s2_beat = RegEnable(s1_beat, s2_latch)
val s2_bypass = RegEnable(s1_bypass, s2_latch)
val s2_req = RegEnable(s1_req, s2_latch)
val s2_last = RegEnable(s1_last, s2_latch)
val s2_need_r = RegEnable(s1_need_r, s2_latch)
val s2_need_pb = RegEnable(s1_need_pb, s2_latch)
val s2_retires = RegEnable(s1_retires, s2_latch)
val s2_need_d = RegEnable(!s1_need_pb || s1_first, s2_latch)
val s2_pdata_raw = Wire(new PutBufferACEntry(params))
val s2_pdata = s2_pdata_raw holdUnless s2_valid_pb
s2_pdata_raw.data := Mux(s2_req.prio(0), io.pb_beat.data, io.rel_beat.data)
s2_pdata_raw.mask := Mux(s2_req.prio(0), io.pb_beat.mask, ~0.U(params.inner.manager.beatBytes.W))
s2_pdata_raw.corrupt := Mux(s2_req.prio(0), io.pb_beat.corrupt, io.rel_beat.corrupt)
io.pb_pop.valid := s2_valid_pb && s2_req.prio(0)
io.pb_pop.bits.index := s2_req.put
io.pb_pop.bits.last := s2_last
io.rel_pop.valid := s2_valid_pb && !s2_req.prio(0)
io.rel_pop.bits.index := s2_req.put
io.rel_pop.bits.last := s2_last
params.ccover(io.pb_pop.valid && !io.pb_pop.ready, "SOURCED_2_PUTA_STALL", "Channel A put buffer was not ready in time")
if (!params.firstLevel)
params.ccover(io.rel_pop.valid && !io.rel_pop.ready, "SOURCED_2_PUTC_STALL", "Channel C put buffer was not ready in time")
val pb_ready = Mux(s2_req.prio(0), io.pb_pop.ready, io.rel_pop.ready)
when (pb_ready) { s2_valid_pb := false.B }
when (s2_valid && s3_ready) { s2_full := false.B }
when (s2_latch) { s2_valid_pb := s1_need_pb }
when (s2_latch) { s2_full := true.B }
params.ccover(s2_valid && !s3_ready, "SOURCED_2_STALL", "Stage 2 pipeline blocked")
s2_valid := s2_full && (!s2_valid_pb || pb_ready)
s2_ready := !s2_full || (s3_ready && (!s2_valid_pb || pb_ready))
////////////////////////////////////// STAGE 3 //////////////////////////////////////
// Send D response
val s3_latch = s2_valid && s3_ready
val s3_full = RegInit(false.B)
val s3_valid_d = RegInit(false.B)
val s3_beat = RegEnable(s2_beat, s3_latch)
val s3_bypass = RegEnable(s2_bypass, s3_latch)
val s3_req = RegEnable(s2_req, s3_latch)
val s3_adjusted_opcode = Mux(s3_req.bad, Get, s3_req.opcode) // kill update when denied
val s3_last = RegEnable(s2_last, s3_latch)
val s3_pdata = RegEnable(s2_pdata, s3_latch)
val s3_need_pb = RegEnable(s2_need_pb, s3_latch)
val s3_retires = RegEnable(s2_retires, s3_latch)
val s3_need_r = RegEnable(s2_need_r, s3_latch)
val s3_need_bs = s3_need_pb
val s3_acq = s3_req.opcode === AcquireBlock || s3_req.opcode === AcquirePerm
// Collect s3's data from either the BankedStore or bypass
// NOTE: we use the s3_bypass passed down from s1_bypass, because s2-s4 were guarded by the hazard checks and not stale
val s3_bypass_data = Wire(UInt())
def chunk(x: UInt): Seq[UInt] = Seq.tabulate(beatBytes/writeBytes) { i => x((i+1)*writeBytes*8-1, i*writeBytes*8) }
def chop (x: UInt): Seq[Bool] = Seq.tabulate(beatBytes/writeBytes) { i => x(i) }
def bypass(sel: UInt, x: UInt, y: UInt) =
(chop(sel) zip (chunk(x) zip chunk(y))) .map { case (s, (x, y)) => Mux(s, x, y) } .asUInt
val s3_rdata = bypass(s3_bypass, s3_bypass_data, queue.io.deq.bits.data)
// Lookup table for response codes
val grant = Mux(s3_req.param === BtoT, Grant, GrantData)
val resp_opcode = VecInit(Seq(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, grant, Grant))
// No restrictions on the type of buffer used here
val d = Wire(chiselTypeOf(io.d))
io.d <> params.micro.innerBuf.d(d)
d.valid := s3_valid_d
d.bits.opcode := Mux(s3_req.prio(0), resp_opcode(s3_req.opcode), ReleaseAck)
d.bits.param := Mux(s3_req.prio(0) && s3_acq, Mux(s3_req.param =/= NtoB, toT, toB), 0.U)
d.bits.size := s3_req.size
d.bits.source := s3_req.source
d.bits.sink := s3_req.sink
d.bits.denied := s3_req.bad
d.bits.data := s3_rdata
d.bits.corrupt := s3_req.bad && d.bits.opcode(0)
queue.io.deq.ready := s3_valid && s4_ready && s3_need_r
assert (!s3_full || !s3_need_r || queue.io.deq.valid)
when (d.ready) { s3_valid_d := false.B }
when (s3_valid && s4_ready) { s3_full := false.B }
when (s3_latch) { s3_valid_d := s2_need_d }
when (s3_latch) { s3_full := true.B }
params.ccover(s3_valid && !s4_ready, "SOURCED_3_STALL", "Stage 3 pipeline blocked")
s3_valid := s3_full && (!s3_valid_d || d.ready)
s3_ready := !s3_full || (s4_ready && (!s3_valid_d || d.ready))
////////////////////////////////////// STAGE 4 //////////////////////////////////////
// Writeback updated data
val s4_latch = s3_valid && s3_retires && s4_ready
val s4_full = RegInit(false.B)
val s4_beat = RegEnable(s3_beat, s4_latch)
val s4_need_r = RegEnable(s3_need_r, s4_latch)
val s4_need_bs = RegEnable(s3_need_bs, s4_latch)
val s4_need_pb = RegEnable(s3_need_pb, s4_latch)
val s4_req = RegEnable(s3_req, s4_latch)
val s4_adjusted_opcode = RegEnable(s3_adjusted_opcode, s4_latch)
val s4_pdata = RegEnable(s3_pdata, s4_latch)
val s4_rdata = RegEnable(s3_rdata, s4_latch)
val atomics = Module(new Atomics(params.inner.bundle))
atomics.io.write := s4_req.prio(2)
atomics.io.a.opcode := s4_adjusted_opcode
atomics.io.a.param := s4_req.param
atomics.io.a.size := 0.U
atomics.io.a.source := 0.U
atomics.io.a.address := 0.U
atomics.io.a.mask := s4_pdata.mask
atomics.io.a.data := s4_pdata.data
atomics.io.a.corrupt := DontCare
atomics.io.data_in := s4_rdata
io.bs_wadr.valid := s4_full && s4_need_bs
io.bs_wadr.bits.noop := false.B
io.bs_wadr.bits.way := s4_req.way
io.bs_wadr.bits.set := s4_req.set
io.bs_wadr.bits.beat := s4_beat
io.bs_wadr.bits.mask := Cat(s4_pdata.mask.asBools.grouped(writeBytes).map(_.reduce(_||_)).toList.reverse)
io.bs_wdat.data := atomics.io.data_out
assert (!(s4_full && s4_need_pb && s4_pdata.corrupt), "Data poisoning unsupported")
params.ccover(io.bs_wadr.valid && !io.bs_wadr.ready, "SOURCED_4_WRITEBACK_STALL", "Data writeback stalled")
params.ccover(s4_req.prio(0) && s4_req.opcode === ArithmeticData && s4_req.param === MIN, "SOURCED_4_ATOMIC_MIN", "Evaluated a signed minimum atomic")
params.ccover(s4_req.prio(0) && s4_req.opcode === ArithmeticData && s4_req.param === MAX, "SOURCED_4_ATOMIC_MAX", "Evaluated a signed maximum atomic")
params.ccover(s4_req.prio(0) && s4_req.opcode === ArithmeticData && s4_req.param === MINU, "SOURCED_4_ATOMIC_MINU", "Evaluated an unsigned minimum atomic")
params.ccover(s4_req.prio(0) && s4_req.opcode === ArithmeticData && s4_req.param === MAXU, "SOURCED_4_ATOMIC_MAXU", "Evaluated an unsigned minimum atomic")
params.ccover(s4_req.prio(0) && s4_req.opcode === ArithmeticData && s4_req.param === ADD, "SOURCED_4_ATOMIC_ADD", "Evaluated an addition atomic")
params.ccover(s4_req.prio(0) && s4_req.opcode === LogicalData && s4_req.param === XOR, "SOURCED_4_ATOMIC_XOR", "Evaluated a bitwise XOR atomic")
params.ccover(s4_req.prio(0) && s4_req.opcode === LogicalData && s4_req.param === OR, "SOURCED_4_ATOMIC_OR", "Evaluated a bitwise OR atomic")
params.ccover(s4_req.prio(0) && s4_req.opcode === LogicalData && s4_req.param === AND, "SOURCED_4_ATOMIC_AND", "Evaluated a bitwise AND atomic")
params.ccover(s4_req.prio(0) && s4_req.opcode === LogicalData && s4_req.param === SWAP, "SOURCED_4_ATOMIC_SWAP", "Evaluated a bitwise SWAP atomic")
when (io.bs_wadr.ready || !s4_need_bs) { s4_full := false.B }
when (s4_latch) { s4_full := true.B }
s4_ready := !s3_retires || !s4_full || io.bs_wadr.ready || !s4_need_bs
////////////////////////////////////// RETIRED //////////////////////////////////////
// Record for bypass the last three retired writebacks
// We need 3 slots to collect what was in s2, s3, s4 when the request was in s1
// ... you can't rely on s4 being full if bubbles got introduced between s1 and s2
val retire = s4_full && (io.bs_wadr.ready || !s4_need_bs)
val s5_req = RegEnable(s4_req, retire)
val s5_beat = RegEnable(s4_beat, retire)
val s5_dat = RegEnable(atomics.io.data_out, retire)
val s6_req = RegEnable(s5_req, retire)
val s6_beat = RegEnable(s5_beat, retire)
val s6_dat = RegEnable(s5_dat, retire)
val s7_dat = RegEnable(s6_dat, retire)
////////////////////////////////////// BYPASSS //////////////////////////////////////
// Manually retime this circuit to pull a register stage forward
val pre_s3_req = Mux(s3_latch, s2_req, s3_req)
val pre_s4_req = Mux(s4_latch, s3_req, s4_req)
val pre_s5_req = Mux(retire, s4_req, s5_req)
val pre_s6_req = Mux(retire, s5_req, s6_req)
val pre_s3_beat = Mux(s3_latch, s2_beat, s3_beat)
val pre_s4_beat = Mux(s4_latch, s3_beat, s4_beat)
val pre_s5_beat = Mux(retire, s4_beat, s5_beat)
val pre_s6_beat = Mux(retire, s5_beat, s6_beat)
val pre_s5_dat = Mux(retire, atomics.io.data_out, s5_dat)
val pre_s6_dat = Mux(retire, s5_dat, s6_dat)
val pre_s7_dat = Mux(retire, s6_dat, s7_dat)
val pre_s4_full = s4_latch || (!(io.bs_wadr.ready || !s4_need_bs) && s4_full)
val pre_s3_4_match = pre_s4_req.set === pre_s3_req.set && pre_s4_req.way === pre_s3_req.way && pre_s4_beat === pre_s3_beat && pre_s4_full
val pre_s3_5_match = pre_s5_req.set === pre_s3_req.set && pre_s5_req.way === pre_s3_req.way && pre_s5_beat === pre_s3_beat
val pre_s3_6_match = pre_s6_req.set === pre_s3_req.set && pre_s6_req.way === pre_s3_req.way && pre_s6_beat === pre_s3_beat
val pre_s3_4_bypass = Mux(pre_s3_4_match, MaskGen(pre_s4_req.offset, pre_s4_req.size, beatBytes, writeBytes), 0.U)
val pre_s3_5_bypass = Mux(pre_s3_5_match, MaskGen(pre_s5_req.offset, pre_s5_req.size, beatBytes, writeBytes), 0.U)
val pre_s3_6_bypass = Mux(pre_s3_6_match, MaskGen(pre_s6_req.offset, pre_s6_req.size, beatBytes, writeBytes), 0.U)
s3_bypass_data :=
bypass(RegNext(pre_s3_4_bypass), atomics.io.data_out, RegNext(
bypass(pre_s3_5_bypass, pre_s5_dat,
bypass(pre_s3_6_bypass, pre_s6_dat,
pre_s7_dat))))
// Detect which parts of s1 will be bypassed from later pipeline stages (s1-s4)
// Note: we also bypass from reads ahead in the pipeline to save power
val s1_2_match = s2_req.set === s1_req.set && s2_req.way === s1_req.way && s2_beat === s1_beat && s2_full && s2_retires
val s1_3_match = s3_req.set === s1_req.set && s3_req.way === s1_req.way && s3_beat === s1_beat && s3_full && s3_retires
val s1_4_match = s4_req.set === s1_req.set && s4_req.way === s1_req.way && s4_beat === s1_beat && s4_full
for (i <- 0 until 8) {
val cover = 1.U
val s2 = s1_2_match === cover(0)
val s3 = s1_3_match === cover(1)
val s4 = s1_4_match === cover(2)
params.ccover(io.req.valid && s2 && s3 && s4, "SOURCED_BYPASS_CASE_" + i, "Bypass data from all subsets of pipeline stages")
}
val s1_2_bypass = Mux(s1_2_match, MaskGen(s2_req.offset, s2_req.size, beatBytes, writeBytes), 0.U)
val s1_3_bypass = Mux(s1_3_match, MaskGen(s3_req.offset, s3_req.size, beatBytes, writeBytes), 0.U)
val s1_4_bypass = Mux(s1_4_match, MaskGen(s4_req.offset, s4_req.size, beatBytes, writeBytes), 0.U)
s1_x_bypass := s1_2_bypass | s1_3_bypass | s1_4_bypass
////////////////////////////////////// HAZARDS //////////////////////////////////////
// SinkC, SourceC, and SinkD can never interfer with each other because their operation
// is fully contained with an execution plan of an MSHR. That MSHR owns the entire set, so
// there is no way for a data race.
// However, SourceD is special. We allow it to run ahead after the MSHR and scheduler have
// released control of a set+way. This is necessary to allow single cycle occupancy for
// hits. Thus, we need to be careful about data hazards between SourceD and the other ports
// of the BankedStore. We can at least compare to registers 's1_req_reg', because the first
// cycle of SourceD falls within the occupancy of the MSHR's plan.
// Must ReleaseData=> be interlocked? RaW hazard
io.evict_safe :=
(!busy || io.evict_req.way =/= s1_req_reg.way || io.evict_req.set =/= s1_req_reg.set) &&
(!s2_full || io.evict_req.way =/= s2_req.way || io.evict_req.set =/= s2_req.set) &&
(!s3_full || io.evict_req.way =/= s3_req.way || io.evict_req.set =/= s3_req.set) &&
(!s4_full || io.evict_req.way =/= s4_req.way || io.evict_req.set =/= s4_req.set)
// Must =>GrantData be interlocked? WaR hazard
io.grant_safe :=
(!busy || io.grant_req.way =/= s1_req_reg.way || io.grant_req.set =/= s1_req_reg.set) &&
(!s2_full || io.grant_req.way =/= s2_req.way || io.grant_req.set =/= s2_req.set) &&
(!s3_full || io.grant_req.way =/= s3_req.way || io.grant_req.set =/= s3_req.set) &&
(!s4_full || io.grant_req.way =/= s4_req.way || io.grant_req.set =/= s4_req.set)
// SourceD cannot overlap with SinkC b/c the only way inner caches could become
// dirty such that they want to put data in via SinkC is if we Granted them permissions,
// which must flow through the SourecD pipeline.
}
| module SourceD( // @[SourceD.scala:48:7]
input clock, // @[SourceD.scala:48:7]
input reset, // @[SourceD.scala:48:7]
output io_req_ready, // @[SourceD.scala:50:14]
input io_req_valid, // @[SourceD.scala:50:14]
input io_req_bits_prio_0, // @[SourceD.scala:50:14]
input io_req_bits_prio_1, // @[SourceD.scala:50:14]
input io_req_bits_prio_2, // @[SourceD.scala:50:14]
input io_req_bits_control, // @[SourceD.scala:50:14]
input [2:0] io_req_bits_opcode, // @[SourceD.scala:50:14]
input [2:0] io_req_bits_param, // @[SourceD.scala:50:14]
input [2:0] io_req_bits_size, // @[SourceD.scala:50:14]
input [5:0] io_req_bits_source, // @[SourceD.scala:50:14]
input [12:0] io_req_bits_tag, // @[SourceD.scala:50:14]
input [5:0] io_req_bits_offset, // @[SourceD.scala:50:14]
input [5:0] io_req_bits_put, // @[SourceD.scala:50:14]
input [9:0] io_req_bits_set, // @[SourceD.scala:50:14]
input [3:0] io_req_bits_sink, // @[SourceD.scala:50:14]
input [2:0] io_req_bits_way, // @[SourceD.scala:50:14]
input io_req_bits_bad, // @[SourceD.scala:50:14]
input io_d_ready, // @[SourceD.scala:50:14]
output io_d_valid, // @[SourceD.scala:50:14]
output [2:0] io_d_bits_opcode, // @[SourceD.scala:50:14]
output [1:0] io_d_bits_param, // @[SourceD.scala:50:14]
output [2:0] io_d_bits_size, // @[SourceD.scala:50:14]
output [5:0] io_d_bits_source, // @[SourceD.scala:50:14]
output [3:0] io_d_bits_sink, // @[SourceD.scala:50:14]
output io_d_bits_denied, // @[SourceD.scala:50:14]
output [127:0] io_d_bits_data, // @[SourceD.scala:50:14]
output io_d_bits_corrupt, // @[SourceD.scala:50:14]
input io_pb_pop_ready, // @[SourceD.scala:50:14]
output io_pb_pop_valid, // @[SourceD.scala:50:14]
output [5:0] io_pb_pop_bits_index, // @[SourceD.scala:50:14]
output io_pb_pop_bits_last, // @[SourceD.scala:50:14]
input [127:0] io_pb_beat_data, // @[SourceD.scala:50:14]
input [15:0] io_pb_beat_mask, // @[SourceD.scala:50:14]
input io_pb_beat_corrupt, // @[SourceD.scala:50:14]
input io_rel_pop_ready, // @[SourceD.scala:50:14]
output io_rel_pop_valid, // @[SourceD.scala:50:14]
output [5:0] io_rel_pop_bits_index, // @[SourceD.scala:50:14]
output io_rel_pop_bits_last, // @[SourceD.scala:50:14]
input [127:0] io_rel_beat_data, // @[SourceD.scala:50:14]
input io_rel_beat_corrupt, // @[SourceD.scala:50:14]
input io_bs_radr_ready, // @[SourceD.scala:50:14]
output io_bs_radr_valid, // @[SourceD.scala:50:14]
output [2:0] io_bs_radr_bits_way, // @[SourceD.scala:50:14]
output [9:0] io_bs_radr_bits_set, // @[SourceD.scala:50:14]
output [1:0] io_bs_radr_bits_beat, // @[SourceD.scala:50:14]
output [1:0] io_bs_radr_bits_mask, // @[SourceD.scala:50:14]
input [127:0] io_bs_rdat_data, // @[SourceD.scala:50:14]
input io_bs_wadr_ready, // @[SourceD.scala:50:14]
output io_bs_wadr_valid, // @[SourceD.scala:50:14]
output [2:0] io_bs_wadr_bits_way, // @[SourceD.scala:50:14]
output [9:0] io_bs_wadr_bits_set, // @[SourceD.scala:50:14]
output [1:0] io_bs_wadr_bits_beat, // @[SourceD.scala:50:14]
output [1:0] io_bs_wadr_bits_mask, // @[SourceD.scala:50:14]
output [127:0] io_bs_wdat_data, // @[SourceD.scala:50:14]
input [9:0] io_evict_req_set, // @[SourceD.scala:50:14]
input [2:0] io_evict_req_way, // @[SourceD.scala:50:14]
output io_evict_safe, // @[SourceD.scala:50:14]
input [9:0] io_grant_req_set, // @[SourceD.scala:50:14]
input [2:0] io_grant_req_way, // @[SourceD.scala:50:14]
output io_grant_safe // @[SourceD.scala:50:14]
);
wire [127:0] _atomics_io_data_out; // @[SourceD.scala:258:23]
wire _queue_io_enq_ready; // @[SourceD.scala:120:21]
wire _queue_io_deq_valid; // @[SourceD.scala:120:21]
wire [127:0] _queue_io_deq_bits_data; // @[SourceD.scala:120:21]
wire io_req_valid_0 = io_req_valid; // @[SourceD.scala:48:7]
wire io_req_bits_prio_0_0 = io_req_bits_prio_0; // @[SourceD.scala:48:7]
wire io_req_bits_prio_1_0 = io_req_bits_prio_1; // @[SourceD.scala:48:7]
wire io_req_bits_prio_2_0 = io_req_bits_prio_2; // @[SourceD.scala:48:7]
wire io_req_bits_control_0 = io_req_bits_control; // @[SourceD.scala:48:7]
wire [2:0] io_req_bits_opcode_0 = io_req_bits_opcode; // @[SourceD.scala:48:7]
wire [2:0] io_req_bits_param_0 = io_req_bits_param; // @[SourceD.scala:48:7]
wire [2:0] io_req_bits_size_0 = io_req_bits_size; // @[SourceD.scala:48:7]
wire [5:0] io_req_bits_source_0 = io_req_bits_source; // @[SourceD.scala:48:7]
wire [12:0] io_req_bits_tag_0 = io_req_bits_tag; // @[SourceD.scala:48:7]
wire [5:0] io_req_bits_offset_0 = io_req_bits_offset; // @[SourceD.scala:48:7]
wire [5:0] io_req_bits_put_0 = io_req_bits_put; // @[SourceD.scala:48:7]
wire [9:0] io_req_bits_set_0 = io_req_bits_set; // @[SourceD.scala:48:7]
wire [3:0] io_req_bits_sink_0 = io_req_bits_sink; // @[SourceD.scala:48:7]
wire [2:0] io_req_bits_way_0 = io_req_bits_way; // @[SourceD.scala:48:7]
wire io_req_bits_bad_0 = io_req_bits_bad; // @[SourceD.scala:48:7]
wire io_d_ready_0 = io_d_ready; // @[SourceD.scala:48:7]
wire io_pb_pop_ready_0 = io_pb_pop_ready; // @[SourceD.scala:48:7]
wire [127:0] io_pb_beat_data_0 = io_pb_beat_data; // @[SourceD.scala:48:7]
wire [15:0] io_pb_beat_mask_0 = io_pb_beat_mask; // @[SourceD.scala:48:7]
wire io_pb_beat_corrupt_0 = io_pb_beat_corrupt; // @[SourceD.scala:48:7]
wire io_rel_pop_ready_0 = io_rel_pop_ready; // @[SourceD.scala:48:7]
wire [127:0] io_rel_beat_data_0 = io_rel_beat_data; // @[SourceD.scala:48:7]
wire io_rel_beat_corrupt_0 = io_rel_beat_corrupt; // @[SourceD.scala:48:7]
wire io_bs_radr_ready_0 = io_bs_radr_ready; // @[SourceD.scala:48:7]
wire [127:0] io_bs_rdat_data_0 = io_bs_rdat_data; // @[SourceD.scala:48:7]
wire io_bs_wadr_ready_0 = io_bs_wadr_ready; // @[SourceD.scala:48:7]
wire [9:0] io_evict_req_set_0 = io_evict_req_set; // @[SourceD.scala:48:7]
wire [2:0] io_evict_req_way_0 = io_evict_req_way; // @[SourceD.scala:48:7]
wire [9:0] io_grant_req_set_0 = io_grant_req_set; // @[SourceD.scala:48:7]
wire [2:0] io_grant_req_way_0 = io_grant_req_way; // @[SourceD.scala:48:7]
wire io_bs_radr_bits_noop = 1'h0; // @[SourceD.scala:48:7]
wire io_bs_wadr_bits_noop = 1'h0; // @[SourceD.scala:48:7]
wire s1_mask_size = 1'h1; // @[Misc.scala:209:26]
wire pre_s3_4_bypass_size = 1'h1; // @[Misc.scala:209:26]
wire pre_s3_5_bypass_size = 1'h1; // @[Misc.scala:209:26]
wire pre_s3_6_bypass_size = 1'h1; // @[Misc.scala:209:26]
wire s1_2_bypass_size = 1'h1; // @[Misc.scala:209:26]
wire s1_3_bypass_size = 1'h1; // @[Misc.scala:209:26]
wire s1_4_bypass_size = 1'h1; // @[Misc.scala:209:26]
wire [3:0] s1_mask_sizeOH = 4'hF; // @[Misc.scala:202:81]
wire [3:0] pre_s3_4_bypass_sizeOH = 4'hF; // @[Misc.scala:202:81]
wire [3:0] pre_s3_5_bypass_sizeOH = 4'hF; // @[Misc.scala:202:81]
wire [3:0] pre_s3_6_bypass_sizeOH = 4'hF; // @[Misc.scala:202:81]
wire [3:0] s1_2_bypass_sizeOH = 4'hF; // @[Misc.scala:202:81]
wire [3:0] s1_3_bypass_sizeOH = 4'hF; // @[Misc.scala:202:81]
wire [3:0] s1_4_bypass_sizeOH = 4'hF; // @[Misc.scala:202:81]
wire [2:0] resp_opcode_0 = 3'h0; // @[SourceD.scala:215:28]
wire [2:0] resp_opcode_1 = 3'h0; // @[SourceD.scala:215:28]
wire [2:0] resp_opcode_7 = 3'h4; // @[SourceD.scala:215:28]
wire [2:0] resp_opcode_5 = 3'h2; // @[SourceD.scala:215:28]
wire [2:0] resp_opcode_2 = 3'h1; // @[SourceD.scala:215:28]
wire [2:0] resp_opcode_3 = 3'h1; // @[SourceD.scala:215:28]
wire [2:0] resp_opcode_4 = 3'h1; // @[SourceD.scala:215:28]
wire [15:0] _s2_pdata_raw_mask_T = 16'hFFFF; // @[SourceD.scala:161:64]
wire _io_req_ready_T; // @[SourceD.scala:140:19]
wire d_ready = io_d_ready_0; // @[SourceD.scala:48:7, :218:15]
wire d_valid; // @[SourceD.scala:218:15]
wire [2:0] d_bits_opcode; // @[SourceD.scala:218:15]
wire [1:0] d_bits_param; // @[SourceD.scala:218:15]
wire [2:0] d_bits_size; // @[SourceD.scala:218:15]
wire [5:0] d_bits_source; // @[SourceD.scala:218:15]
wire [3:0] d_bits_sink; // @[SourceD.scala:218:15]
wire d_bits_denied; // @[SourceD.scala:218:15]
wire [127:0] d_bits_data; // @[SourceD.scala:218:15]
wire d_bits_corrupt; // @[SourceD.scala:218:15]
wire _io_pb_pop_valid_T; // @[SourceD.scala:164:34]
wire _io_rel_pop_valid_T_1; // @[SourceD.scala:167:35]
wire s1_valid_r; // @[SourceD.scala:96:56]
wire [2:0] s1_req_way; // @[SourceD.scala:88:19]
wire [9:0] s1_req_set; // @[SourceD.scala:88:19]
wire [1:0] s1_beat; // @[SourceD.scala:102:56]
wire [1:0] s1_mask; // @[SourceD.scala:92:76]
wire _io_bs_wadr_valid_T; // @[SourceD.scala:270:31]
wire [1:0] _io_bs_wadr_bits_mask_T_30; // @[SourceD.scala:275:30]
wire _io_evict_safe_T_22; // @[SourceD.scala:378:90]
wire _io_grant_safe_T_22; // @[SourceD.scala:385:90]
wire io_req_ready_0; // @[SourceD.scala:48:7]
wire [2:0] io_d_bits_opcode_0; // @[SourceD.scala:48:7]
wire [1:0] io_d_bits_param_0; // @[SourceD.scala:48:7]
wire [2:0] io_d_bits_size_0; // @[SourceD.scala:48:7]
wire [5:0] io_d_bits_source_0; // @[SourceD.scala:48:7]
wire [3:0] io_d_bits_sink_0; // @[SourceD.scala:48:7]
wire io_d_bits_denied_0; // @[SourceD.scala:48:7]
wire [127:0] io_d_bits_data_0; // @[SourceD.scala:48:7]
wire io_d_bits_corrupt_0; // @[SourceD.scala:48:7]
wire io_d_valid_0; // @[SourceD.scala:48:7]
wire [5:0] io_pb_pop_bits_index_0; // @[SourceD.scala:48:7]
wire io_pb_pop_bits_last_0; // @[SourceD.scala:48:7]
wire io_pb_pop_valid_0; // @[SourceD.scala:48:7]
wire [5:0] io_rel_pop_bits_index_0; // @[SourceD.scala:48:7]
wire io_rel_pop_bits_last_0; // @[SourceD.scala:48:7]
wire io_rel_pop_valid_0; // @[SourceD.scala:48:7]
wire [2:0] io_bs_radr_bits_way_0; // @[SourceD.scala:48:7]
wire [9:0] io_bs_radr_bits_set_0; // @[SourceD.scala:48:7]
wire [1:0] io_bs_radr_bits_beat_0; // @[SourceD.scala:48:7]
wire [1:0] io_bs_radr_bits_mask_0; // @[SourceD.scala:48:7]
wire io_bs_radr_valid_0; // @[SourceD.scala:48:7]
wire [2:0] io_bs_wadr_bits_way_0; // @[SourceD.scala:48:7]
wire [9:0] io_bs_wadr_bits_set_0; // @[SourceD.scala:48:7]
wire [1:0] io_bs_wadr_bits_beat_0; // @[SourceD.scala:48:7]
wire [1:0] io_bs_wadr_bits_mask_0; // @[SourceD.scala:48:7]
wire io_bs_wadr_valid_0; // @[SourceD.scala:48:7]
wire [127:0] io_bs_wdat_data_0; // @[SourceD.scala:48:7]
wire io_evict_safe_0; // @[SourceD.scala:48:7]
wire io_grant_safe_0; // @[SourceD.scala:48:7]
wire _s1_valid_T_3; // @[SourceD.scala:141:38]
wire s1_valid; // @[SourceD.scala:74:22]
wire _s2_valid_T_2; // @[SourceD.scala:183:23]
wire s2_valid; // @[SourceD.scala:75:22]
wire _s3_valid_T_2; // @[SourceD.scala:241:23]
wire s3_valid; // @[SourceD.scala:76:22]
wire _s2_ready_T_4; // @[SourceD.scala:184:24]
wire s2_ready; // @[SourceD.scala:77:22]
wire _s3_ready_T_4; // @[SourceD.scala:242:24]
wire s3_ready; // @[SourceD.scala:78:22]
wire _s4_ready_T_5; // @[SourceD.scala:293:59]
wire s4_ready; // @[SourceD.scala:79:22]
reg busy; // @[SourceD.scala:84:21]
reg s1_block_r; // @[SourceD.scala:85:27]
reg [1:0] s1_counter; // @[SourceD.scala:86:27]
wire _s1_req_reg_T = ~busy; // @[SourceD.scala:84:21, :87:43]
wire _s1_req_reg_T_1 = _s1_req_reg_T & io_req_valid_0; // @[SourceD.scala:48:7, :87:{43,49}]
reg s1_req_reg_prio_0; // @[SourceD.scala:87:29]
reg s1_req_reg_prio_1; // @[SourceD.scala:87:29]
reg s1_req_reg_prio_2; // @[SourceD.scala:87:29]
reg s1_req_reg_control; // @[SourceD.scala:87:29]
reg [2:0] s1_req_reg_opcode; // @[SourceD.scala:87:29]
reg [2:0] s1_req_reg_param; // @[SourceD.scala:87:29]
reg [2:0] s1_req_reg_size; // @[SourceD.scala:87:29]
reg [5:0] s1_req_reg_source; // @[SourceD.scala:87:29]
reg [12:0] s1_req_reg_tag; // @[SourceD.scala:87:29]
reg [5:0] s1_req_reg_offset; // @[SourceD.scala:87:29]
reg [5:0] s1_req_reg_put; // @[SourceD.scala:87:29]
reg [9:0] s1_req_reg_set; // @[SourceD.scala:87:29]
reg [3:0] s1_req_reg_sink; // @[SourceD.scala:87:29]
reg [2:0] s1_req_reg_way; // @[SourceD.scala:87:29]
reg s1_req_reg_bad; // @[SourceD.scala:87:29]
wire _s1_req_T = ~busy; // @[SourceD.scala:84:21, :87:43, :88:20]
wire s1_req_prio_0 = _s1_req_T ? io_req_bits_prio_0_0 : s1_req_reg_prio_0; // @[SourceD.scala:48:7, :87:29, :88:{19,20}]
wire s1_req_prio_1 = _s1_req_T ? io_req_bits_prio_1_0 : s1_req_reg_prio_1; // @[SourceD.scala:48:7, :87:29, :88:{19,20}]
wire s1_req_prio_2 = _s1_req_T ? io_req_bits_prio_2_0 : s1_req_reg_prio_2; // @[SourceD.scala:48:7, :87:29, :88:{19,20}]
wire s1_req_control = _s1_req_T ? io_req_bits_control_0 : s1_req_reg_control; // @[SourceD.scala:48:7, :87:29, :88:{19,20}]
wire [2:0] s1_req_opcode = _s1_req_T ? io_req_bits_opcode_0 : s1_req_reg_opcode; // @[SourceD.scala:48:7, :87:29, :88:{19,20}]
wire [2:0] s1_req_param = _s1_req_T ? io_req_bits_param_0 : s1_req_reg_param; // @[SourceD.scala:48:7, :87:29, :88:{19,20}]
wire [2:0] s1_req_size = _s1_req_T ? io_req_bits_size_0 : s1_req_reg_size; // @[SourceD.scala:48:7, :87:29, :88:{19,20}]
wire [5:0] s1_req_source = _s1_req_T ? io_req_bits_source_0 : s1_req_reg_source; // @[SourceD.scala:48:7, :87:29, :88:{19,20}]
wire [12:0] s1_req_tag = _s1_req_T ? io_req_bits_tag_0 : s1_req_reg_tag; // @[SourceD.scala:48:7, :87:29, :88:{19,20}]
wire [5:0] s1_req_offset = _s1_req_T ? io_req_bits_offset_0 : s1_req_reg_offset; // @[SourceD.scala:48:7, :87:29, :88:{19,20}]
wire [5:0] s1_req_put = _s1_req_T ? io_req_bits_put_0 : s1_req_reg_put; // @[SourceD.scala:48:7, :87:29, :88:{19,20}]
assign s1_req_set = _s1_req_T ? io_req_bits_set_0 : s1_req_reg_set; // @[SourceD.scala:48:7, :87:29, :88:{19,20}]
wire [3:0] s1_req_sink = _s1_req_T ? io_req_bits_sink_0 : s1_req_reg_sink; // @[SourceD.scala:48:7, :87:29, :88:{19,20}]
assign s1_req_way = _s1_req_T ? io_req_bits_way_0 : s1_req_reg_way; // @[SourceD.scala:48:7, :87:29, :88:{19,20}]
wire s1_req_bad = _s1_req_T ? io_req_bits_bad_0 : s1_req_reg_bad; // @[SourceD.scala:48:7, :87:29, :88:{19,20}]
assign io_bs_radr_bits_set_0 = s1_req_set; // @[SourceD.scala:48:7, :88:19]
assign io_bs_radr_bits_way_0 = s1_req_way; // @[SourceD.scala:48:7, :88:19]
wire [1:0] _s1_x_bypass_T_1; // @[SourceD.scala:360:44]
wire [1:0] s1_x_bypass; // @[SourceD.scala:89:25]
wire _T_1 = busy | io_req_valid_0; // @[SourceD.scala:48:7, :84:21, :90:40]
wire _s1_latch_bypass_T; // @[SourceD.scala:90:40]
assign _s1_latch_bypass_T = _T_1; // @[SourceD.scala:90:40]
wire _s1_valid_r_T; // @[SourceD.scala:96:26]
assign _s1_valid_r_T = _T_1; // @[SourceD.scala:90:40, :96:26]
wire _s1_valid_T; // @[SourceD.scala:141:21]
assign _s1_valid_T = _T_1; // @[SourceD.scala:90:40, :141:21]
wire _s1_latch_bypass_T_1 = ~_s1_latch_bypass_T; // @[SourceD.scala:90:{33,40}]
wire _s1_latch_bypass_T_2 = _s1_latch_bypass_T_1 | s2_ready; // @[SourceD.scala:77:22, :90:{33,57}]
reg s1_latch_bypass; // @[SourceD.scala:90:32]
reg [1:0] s1_bypass_r; // @[SourceD.scala:91:62]
wire [1:0] s1_bypass = s1_latch_bypass ? s1_x_bypass : s1_bypass_r; // @[SourceD.scala:89:25, :90:32, :91:{22,62}]
wire [3:0] _s1_mask_sizeOH_T = {1'h0, s1_req_size}; // @[Misc.scala:202:34]
wire [1:0] s1_mask_sizeOH_shiftAmount = _s1_mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _s1_mask_sizeOH_T_1 = 4'h1 << s1_mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [3:0] _s1_mask_sizeOH_T_2 = _s1_mask_sizeOH_T_1; // @[OneHot.scala:65:{12,27}]
wire s1_mask_sub_0_1 = s1_req_size[2]; // @[Misc.scala:206:21]
wire s1_mask_bit = s1_req_offset[3]; // @[Misc.scala:210:26]
wire s1_mask_eq_1 = s1_mask_bit; // @[Misc.scala:210:26, :214:27]
wire s1_mask_nbit = ~s1_mask_bit; // @[Misc.scala:210:26, :211:20]
wire s1_mask_eq = s1_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _s1_mask_acc_T = s1_mask_eq; // @[Misc.scala:214:27, :215:38]
wire s1_mask_acc = s1_mask_sub_0_1 | _s1_mask_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _s1_mask_acc_T_1 = s1_mask_eq_1; // @[Misc.scala:214:27, :215:38]
wire s1_mask_acc_1 = s1_mask_sub_0_1 | _s1_mask_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire [1:0] _s1_mask_T = {s1_mask_acc_1, s1_mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] _s1_mask_T_1 = ~s1_bypass; // @[SourceD.scala:91:22, :92:78]
assign s1_mask = _s1_mask_T & _s1_mask_T_1; // @[Misc.scala:222:10]
assign io_bs_radr_bits_mask_0 = s1_mask; // @[SourceD.scala:48:7, :92:76]
wire _GEN = s1_req_opcode == 3'h6; // @[SourceD.scala:88:19, :93:33]
wire _s1_grant_T; // @[SourceD.scala:93:33]
assign _s1_grant_T = _GEN; // @[SourceD.scala:93:33]
wire _s1_single_T_2; // @[SourceD.scala:98:89]
assign _s1_single_T_2 = _GEN; // @[SourceD.scala:93:33, :98:89]
wire _s1_grant_T_1 = s1_req_param == 3'h2; // @[SourceD.scala:88:19, :93:66]
wire _s1_grant_T_2 = _s1_grant_T & _s1_grant_T_1; // @[SourceD.scala:93:{33,50,66}]
wire _s1_grant_T_3 = &s1_req_opcode; // @[SourceD.scala:88:19, :93:93]
wire s1_grant = _s1_grant_T_2 | _s1_grant_T_3; // @[SourceD.scala:93:{50,76,93}]
wire _s1_need_r_T = |s1_mask; // @[SourceD.scala:92:76, :94:27]
wire _s1_need_r_T_1 = _s1_need_r_T & s1_req_prio_0; // @[SourceD.scala:88:19, :94:{27,31}]
wire _s1_need_r_T_2 = s1_req_opcode != 3'h5; // @[SourceD.scala:88:19, :94:66]
wire _s1_need_r_T_3 = _s1_need_r_T_1 & _s1_need_r_T_2; // @[SourceD.scala:94:{31,49,66}]
wire _s1_need_r_T_4 = ~s1_grant; // @[SourceD.scala:93:76, :94:78]
wire _s1_need_r_T_5 = _s1_need_r_T_3 & _s1_need_r_T_4; // @[SourceD.scala:94:{49,75,78}]
wire _s1_need_r_T_6 = |s1_req_opcode; // @[SourceD.scala:88:19, :95:34]
wire _s1_need_r_T_7 = s1_req_size < 3'h3; // @[SourceD.scala:88:19, :95:65]
wire _s1_need_r_T_8 = _s1_need_r_T_6 | _s1_need_r_T_7; // @[SourceD.scala:95:{34,50,65}]
wire s1_need_r = _s1_need_r_T_5 & _s1_need_r_T_8; // @[SourceD.scala:94:{75,88}, :95:50]
wire _s1_valid_r_T_1 = _s1_valid_r_T & s1_need_r; // @[SourceD.scala:94:88, :96:{26,43}]
wire _s1_valid_r_T_2 = ~s1_block_r; // @[SourceD.scala:85:27, :96:59]
assign s1_valid_r = _s1_valid_r_T_1 & _s1_valid_r_T_2; // @[SourceD.scala:96:{43,56,59}]
assign io_bs_radr_valid_0 = s1_valid_r; // @[SourceD.scala:48:7, :96:56]
wire _s1_need_pb_T = s1_req_opcode[2]; // @[SourceD.scala:88:19, :97:54]
wire _s1_need_pb_T_1 = ~_s1_need_pb_T; // @[SourceD.scala:97:{40,54}]
wire _s1_need_pb_T_2 = s1_req_opcode[0]; // @[SourceD.scala:88:19, :97:72]
wire s1_need_pb = s1_req_prio_0 ? _s1_need_pb_T_1 : _s1_need_pb_T_2; // @[SourceD.scala:88:19, :97:{23,40,72}]
wire _s1_single_T = s1_req_opcode == 3'h5; // @[SourceD.scala:88:19, :98:53]
wire _s1_single_T_1 = _s1_single_T | s1_grant; // @[SourceD.scala:93:76, :98:{53,62}]
wire s1_single = s1_req_prio_0 ? _s1_single_T_1 : _s1_single_T_2; // @[SourceD.scala:88:19, :98:{22,62,89}]
wire s1_retires = ~s1_single; // @[SourceD.scala:98:22, :99:20]
wire [12:0] _s1_beats1_T = 13'h3F << s1_req_size; // @[package.scala:243:71]
wire [5:0] _s1_beats1_T_1 = _s1_beats1_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _s1_beats1_T_2 = ~_s1_beats1_T_1; // @[package.scala:243:{46,76}]
wire [1:0] _s1_beats1_T_3 = _s1_beats1_T_2[5:4]; // @[package.scala:243:46]
wire [1:0] s1_beats1 = s1_single ? 2'h0 : _s1_beats1_T_3; // @[SourceD.scala:98:22, :101:{22,95}]
wire [1:0] _s1_beat_T = s1_req_offset[5:4]; // @[SourceD.scala:88:19, :102:32]
assign s1_beat = _s1_beat_T | s1_counter; // @[SourceD.scala:86:27, :102:{32,56}]
assign io_bs_radr_bits_beat_0 = s1_beat; // @[SourceD.scala:48:7, :102:56]
wire s1_last = s1_counter == s1_beats1; // @[SourceD.scala:86:27, :101:22, :103:28]
wire s1_first = s1_counter == 2'h0; // @[SourceD.scala:86:27, :104:29]
wire _queue_io_enq_valid_T = io_bs_radr_ready_0 & io_bs_radr_valid_0; // @[Decoupled.scala:51:35]
reg queue_io_enq_valid_REG; // @[SourceD.scala:121:40]
reg queue_io_enq_valid_REG_1; // @[SourceD.scala:121:32]
wire s2_latch = s1_valid & s2_ready; // @[SourceD.scala:74:22, :77:22, :129:18, :146:27]
wire [2:0] _s1_counter_T = {1'h0, s1_counter} + 3'h1; // @[SourceD.scala:86:27, :130:30]
wire [1:0] _s1_counter_T_1 = _s1_counter_T[1:0]; // @[SourceD.scala:130:30]
assign _io_req_ready_T = ~busy; // @[SourceD.scala:84:21, :87:43, :140:19]
assign io_req_ready_0 = _io_req_ready_T; // @[SourceD.scala:48:7, :140:19]
wire _s1_valid_T_1 = ~s1_valid_r; // @[SourceD.scala:96:56, :141:42]
wire _s1_valid_T_2 = _s1_valid_T_1 | io_bs_radr_ready_0; // @[SourceD.scala:48:7, :141:{42,54}]
assign _s1_valid_T_3 = _s1_valid_T & _s1_valid_T_2; // @[SourceD.scala:141:{21,38,54}]
assign s1_valid = _s1_valid_T_3; // @[SourceD.scala:74:22, :141:38]
reg s2_full; // @[SourceD.scala:147:24]
reg s2_valid_pb; // @[SourceD.scala:148:28]
reg [1:0] s2_beat; // @[SourceD.scala:149:26]
reg [1:0] s2_bypass; // @[SourceD.scala:150:28]
reg s2_req_prio_0; // @[SourceD.scala:151:25]
reg s2_req_prio_1; // @[SourceD.scala:151:25]
reg s2_req_prio_2; // @[SourceD.scala:151:25]
reg s2_req_control; // @[SourceD.scala:151:25]
reg [2:0] s2_req_opcode; // @[SourceD.scala:151:25]
reg [2:0] s2_req_param; // @[SourceD.scala:151:25]
reg [2:0] s2_req_size; // @[SourceD.scala:151:25]
reg [5:0] s2_req_source; // @[SourceD.scala:151:25]
reg [12:0] s2_req_tag; // @[SourceD.scala:151:25]
reg [5:0] s2_req_offset; // @[SourceD.scala:151:25]
reg [5:0] s2_req_put; // @[SourceD.scala:151:25]
assign io_pb_pop_bits_index_0 = s2_req_put; // @[SourceD.scala:48:7, :151:25]
assign io_rel_pop_bits_index_0 = s2_req_put; // @[SourceD.scala:48:7, :151:25]
reg [9:0] s2_req_set; // @[SourceD.scala:151:25]
reg [3:0] s2_req_sink; // @[SourceD.scala:151:25]
reg [2:0] s2_req_way; // @[SourceD.scala:151:25]
reg s2_req_bad; // @[SourceD.scala:151:25]
reg s2_last; // @[SourceD.scala:152:26]
assign io_pb_pop_bits_last_0 = s2_last; // @[SourceD.scala:48:7, :152:26]
assign io_rel_pop_bits_last_0 = s2_last; // @[SourceD.scala:48:7, :152:26]
reg s2_need_r; // @[SourceD.scala:153:28]
reg s2_need_pb; // @[SourceD.scala:154:29]
reg s2_retires; // @[SourceD.scala:155:29]
wire _s2_need_d_T = ~s1_need_pb; // @[SourceD.scala:97:23, :156:29]
wire _s2_need_d_T_1 = _s2_need_d_T | s1_first; // @[SourceD.scala:104:29, :156:{29,41}]
reg s2_need_d; // @[SourceD.scala:156:28]
wire [127:0] _s2_pdata_raw_data_T; // @[SourceD.scala:160:30]
wire [15:0] _s2_pdata_raw_mask_T_1; // @[SourceD.scala:161:30]
wire _s2_pdata_raw_corrupt_T; // @[SourceD.scala:162:30]
wire [127:0] s2_pdata_raw_data; // @[SourceD.scala:157:26]
wire [15:0] s2_pdata_raw_mask; // @[SourceD.scala:157:26]
wire s2_pdata_raw_corrupt; // @[SourceD.scala:157:26]
reg [127:0] s2_pdata_r_data; // @[package.scala:88:63]
reg [15:0] s2_pdata_r_mask; // @[package.scala:88:63]
reg s2_pdata_r_corrupt; // @[package.scala:88:63]
wire [127:0] s2_pdata_data = s2_valid_pb ? s2_pdata_raw_data : s2_pdata_r_data; // @[package.scala:88:{42,63}]
wire [15:0] s2_pdata_mask = s2_valid_pb ? s2_pdata_raw_mask : s2_pdata_r_mask; // @[package.scala:88:{42,63}]
wire s2_pdata_corrupt = s2_valid_pb ? s2_pdata_raw_corrupt : s2_pdata_r_corrupt; // @[package.scala:88:{42,63}]
assign _s2_pdata_raw_data_T = s2_req_prio_0 ? io_pb_beat_data_0 : io_rel_beat_data_0; // @[SourceD.scala:48:7, :151:25, :160:30]
assign s2_pdata_raw_data = _s2_pdata_raw_data_T; // @[SourceD.scala:157:26, :160:30]
assign _s2_pdata_raw_mask_T_1 = s2_req_prio_0 ? io_pb_beat_mask_0 : 16'hFFFF; // @[SourceD.scala:48:7, :151:25, :161:30]
assign s2_pdata_raw_mask = _s2_pdata_raw_mask_T_1; // @[SourceD.scala:157:26, :161:30]
assign _s2_pdata_raw_corrupt_T = s2_req_prio_0 ? io_pb_beat_corrupt_0 : io_rel_beat_corrupt_0; // @[SourceD.scala:48:7, :151:25, :162:30]
assign s2_pdata_raw_corrupt = _s2_pdata_raw_corrupt_T; // @[SourceD.scala:157:26, :162:30]
assign _io_pb_pop_valid_T = s2_valid_pb & s2_req_prio_0; // @[SourceD.scala:148:28, :151:25, :164:34]
assign io_pb_pop_valid_0 = _io_pb_pop_valid_T; // @[SourceD.scala:48:7, :164:34]
wire _io_rel_pop_valid_T = ~s2_req_prio_0; // @[SourceD.scala:151:25, :167:38]
assign _io_rel_pop_valid_T_1 = s2_valid_pb & _io_rel_pop_valid_T; // @[SourceD.scala:148:28, :167:{35,38}]
assign io_rel_pop_valid_0 = _io_rel_pop_valid_T_1; // @[SourceD.scala:48:7, :167:35]
wire pb_ready = s2_req_prio_0 ? io_pb_pop_ready_0 : io_rel_pop_ready_0; // @[SourceD.scala:48:7, :151:25, :175:21]
wire s3_latch = s2_valid & s3_ready; // @[SourceD.scala:75:22, :78:22, :177:18, :189:27]
wire _s2_valid_T = ~s2_valid_pb; // @[SourceD.scala:148:28, :183:27]
wire _s2_valid_T_1 = _s2_valid_T | pb_ready; // @[SourceD.scala:175:21, :183:{27,40}]
assign _s2_valid_T_2 = s2_full & _s2_valid_T_1; // @[SourceD.scala:147:24, :183:{23,40}]
assign s2_valid = _s2_valid_T_2; // @[SourceD.scala:75:22, :183:23]
wire _s2_ready_T = ~s2_full; // @[SourceD.scala:147:24, :184:15]
wire _s2_ready_T_1 = ~s2_valid_pb; // @[SourceD.scala:148:28, :183:27, :184:41]
wire _s2_ready_T_2 = _s2_ready_T_1 | pb_ready; // @[SourceD.scala:175:21, :184:{41,54}]
wire _s2_ready_T_3 = s3_ready & _s2_ready_T_2; // @[SourceD.scala:78:22, :184:{37,54}]
assign _s2_ready_T_4 = _s2_ready_T | _s2_ready_T_3; // @[SourceD.scala:184:{15,24,37}]
assign s2_ready = _s2_ready_T_4; // @[SourceD.scala:77:22, :184:24]
reg s3_full; // @[SourceD.scala:190:24]
reg s3_valid_d; // @[SourceD.scala:191:27]
assign d_valid = s3_valid_d; // @[SourceD.scala:191:27, :218:15]
reg [1:0] s3_beat; // @[SourceD.scala:192:26]
wire [1:0] pre_s3_beat = s3_latch ? s2_beat : s3_beat; // @[SourceD.scala:149:26, :189:27, :192:26, :319:24]
reg [1:0] s3_bypass; // @[SourceD.scala:193:28]
reg s3_req_prio_0; // @[SourceD.scala:194:25]
reg s3_req_prio_1; // @[SourceD.scala:194:25]
reg s3_req_prio_2; // @[SourceD.scala:194:25]
reg s3_req_control; // @[SourceD.scala:194:25]
reg [2:0] s3_req_opcode; // @[SourceD.scala:194:25]
reg [2:0] s3_req_param; // @[SourceD.scala:194:25]
reg [2:0] s3_req_size; // @[SourceD.scala:194:25]
assign d_bits_size = s3_req_size; // @[SourceD.scala:194:25, :218:15]
reg [5:0] s3_req_source; // @[SourceD.scala:194:25]
assign d_bits_source = s3_req_source; // @[SourceD.scala:194:25, :218:15]
reg [12:0] s3_req_tag; // @[SourceD.scala:194:25]
reg [5:0] s3_req_offset; // @[SourceD.scala:194:25]
reg [5:0] s3_req_put; // @[SourceD.scala:194:25]
reg [9:0] s3_req_set; // @[SourceD.scala:194:25]
reg [3:0] s3_req_sink; // @[SourceD.scala:194:25]
assign d_bits_sink = s3_req_sink; // @[SourceD.scala:194:25, :218:15]
reg [2:0] s3_req_way; // @[SourceD.scala:194:25]
reg s3_req_bad; // @[SourceD.scala:194:25]
assign d_bits_denied = s3_req_bad; // @[SourceD.scala:194:25, :218:15]
wire pre_s3_req_prio_0 = s3_latch ? s2_req_prio_0 : s3_req_prio_0; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24]
wire pre_s3_req_prio_1 = s3_latch ? s2_req_prio_1 : s3_req_prio_1; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24]
wire pre_s3_req_prio_2 = s3_latch ? s2_req_prio_2 : s3_req_prio_2; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24]
wire pre_s3_req_control = s3_latch ? s2_req_control : s3_req_control; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24]
wire [2:0] pre_s3_req_opcode = s3_latch ? s2_req_opcode : s3_req_opcode; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24]
wire [2:0] pre_s3_req_param = s3_latch ? s2_req_param : s3_req_param; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24]
wire [2:0] pre_s3_req_size = s3_latch ? s2_req_size : s3_req_size; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24]
wire [5:0] pre_s3_req_source = s3_latch ? s2_req_source : s3_req_source; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24]
wire [12:0] pre_s3_req_tag = s3_latch ? s2_req_tag : s3_req_tag; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24]
wire [5:0] pre_s3_req_offset = s3_latch ? s2_req_offset : s3_req_offset; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24]
wire [5:0] pre_s3_req_put = s3_latch ? s2_req_put : s3_req_put; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24]
wire [9:0] pre_s3_req_set = s3_latch ? s2_req_set : s3_req_set; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24]
wire [3:0] pre_s3_req_sink = s3_latch ? s2_req_sink : s3_req_sink; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24]
wire [2:0] pre_s3_req_way = s3_latch ? s2_req_way : s3_req_way; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24]
wire pre_s3_req_bad = s3_latch ? s2_req_bad : s3_req_bad; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24]
wire [2:0] s3_adjusted_opcode = s3_req_bad ? 3'h4 : s3_req_opcode; // @[SourceD.scala:194:25, :195:31]
reg s3_last; // @[SourceD.scala:196:26]
reg [127:0] s3_pdata_data; // @[SourceD.scala:197:27]
reg [15:0] s3_pdata_mask; // @[SourceD.scala:197:27]
reg s3_pdata_corrupt; // @[SourceD.scala:197:27]
reg s3_need_pb; // @[SourceD.scala:198:29]
reg s3_retires; // @[SourceD.scala:199:29]
reg s3_need_r; // @[SourceD.scala:200:28]
wire _s3_acq_T = s3_req_opcode == 3'h6; // @[SourceD.scala:194:25, :202:30]
wire _s3_acq_T_1 = &s3_req_opcode; // @[SourceD.scala:194:25, :202:64]
wire s3_acq = _s3_acq_T | _s3_acq_T_1; // @[SourceD.scala:202:{30,47,64}]
wire [127:0] _s3_bypass_data_T_26; // @[package.scala:45:27]
wire [127:0] s3_bypass_data; // @[SourceD.scala:206:28]
wire _s3_rdata_T = s3_bypass[0]; // @[SourceD.scala:193:28, :208:78]
wire _s3_rdata_T_1 = s3_bypass[1]; // @[SourceD.scala:193:28, :208:78]
wire [63:0] _s3_rdata_T_2 = s3_bypass_data[63:0]; // @[SourceD.scala:206:28, :207:78]
wire [63:0] _s3_rdata_T_3 = s3_bypass_data[127:64]; // @[SourceD.scala:206:28, :207:78]
wire [63:0] _s3_rdata_T_4 = _queue_io_deq_bits_data[63:0]; // @[SourceD.scala:120:21, :207:78]
wire [63:0] _s3_rdata_T_5 = _queue_io_deq_bits_data[127:64]; // @[SourceD.scala:120:21, :207:78]
wire [63:0] _s3_rdata_T_6 = _s3_rdata_T ? _s3_rdata_T_2 : _s3_rdata_T_4; // @[SourceD.scala:207:78, :208:78, :210:75]
wire [63:0] _s3_rdata_T_7 = _s3_rdata_T_1 ? _s3_rdata_T_3 : _s3_rdata_T_5; // @[SourceD.scala:207:78, :208:78, :210:75]
wire [127:0] s3_rdata = {_s3_rdata_T_7, _s3_rdata_T_6}; // @[package.scala:45:27]
assign d_bits_data = s3_rdata; // @[package.scala:45:27]
wire _grant_T = s3_req_param == 3'h2; // @[SourceD.scala:194:25, :214:32]
wire [2:0] grant = {2'h2, ~_grant_T}; // @[SourceD.scala:214:{18,32}]
wire [2:0] resp_opcode_6 = grant; // @[SourceD.scala:214:18, :215:28]
assign io_d_valid_0 = d_valid; // @[SourceD.scala:48:7, :218:15]
wire [2:0] _d_bits_opcode_T; // @[SourceD.scala:222:24]
assign io_d_bits_opcode_0 = d_bits_opcode; // @[SourceD.scala:48:7, :218:15]
wire [1:0] _d_bits_param_T_3; // @[SourceD.scala:223:24]
assign io_d_bits_param_0 = d_bits_param; // @[SourceD.scala:48:7, :218:15]
assign io_d_bits_size_0 = d_bits_size; // @[SourceD.scala:48:7, :218:15]
assign io_d_bits_source_0 = d_bits_source; // @[SourceD.scala:48:7, :218:15]
assign io_d_bits_sink_0 = d_bits_sink; // @[SourceD.scala:48:7, :218:15]
assign io_d_bits_denied_0 = d_bits_denied; // @[SourceD.scala:48:7, :218:15]
assign io_d_bits_data_0 = d_bits_data; // @[SourceD.scala:48:7, :218:15]
wire _d_bits_corrupt_T_1; // @[SourceD.scala:229:32]
assign io_d_bits_corrupt_0 = d_bits_corrupt; // @[SourceD.scala:48:7, :218:15]
wire [7:0][2:0] _GEN_0 = {{3'h4}, {resp_opcode_6}, {3'h2}, {3'h1}, {3'h1}, {3'h1}, {3'h0}, {3'h0}}; // @[SourceD.scala:215:28, :222:24]
assign _d_bits_opcode_T = s3_req_prio_0 ? _GEN_0[s3_req_opcode] : 3'h6; // @[SourceD.scala:194:25, :222:24]
assign d_bits_opcode = _d_bits_opcode_T; // @[SourceD.scala:218:15, :222:24]
wire _d_bits_param_T = s3_req_prio_0 & s3_acq; // @[SourceD.scala:194:25, :202:47, :223:40]
wire _d_bits_param_T_1 = |s3_req_param; // @[SourceD.scala:194:25, :223:68]
wire [1:0] _d_bits_param_T_2 = {1'h0, ~_d_bits_param_T_1}; // @[SourceD.scala:223:{54,68}]
assign _d_bits_param_T_3 = _d_bits_param_T ? _d_bits_param_T_2 : 2'h0; // @[SourceD.scala:223:{24,40,54}]
assign d_bits_param = _d_bits_param_T_3; // @[SourceD.scala:218:15, :223:24]
wire _d_bits_corrupt_T = d_bits_opcode[0]; // @[SourceD.scala:218:15, :229:48]
assign _d_bits_corrupt_T_1 = s3_req_bad & _d_bits_corrupt_T; // @[SourceD.scala:194:25, :229:{32,48}]
assign d_bits_corrupt = _d_bits_corrupt_T_1; // @[SourceD.scala:218:15, :229:32]
wire _queue_io_deq_ready_T = s3_valid & s4_ready; // @[SourceD.scala:76:22, :79:22, :231:34]
wire _queue_io_deq_ready_T_1 = _queue_io_deq_ready_T & s3_need_r; // @[SourceD.scala:200:28, :231:{34,46}]
wire _s3_valid_T = ~s3_valid_d; // @[SourceD.scala:191:27, :241:27]
wire _s3_valid_T_1 = _s3_valid_T | d_ready; // @[SourceD.scala:218:15, :241:{27,39}]
assign _s3_valid_T_2 = s3_full & _s3_valid_T_1; // @[SourceD.scala:190:24, :241:{23,39}]
assign s3_valid = _s3_valid_T_2; // @[SourceD.scala:76:22, :241:23]
wire _s3_ready_T = ~s3_full; // @[SourceD.scala:190:24, :232:11, :242:15]
wire _s3_ready_T_1 = ~s3_valid_d; // @[SourceD.scala:191:27, :241:27, :242:41]
wire _s3_ready_T_2 = _s3_ready_T_1 | d_ready; // @[SourceD.scala:218:15, :242:{41,53}]
wire _s3_ready_T_3 = s4_ready & _s3_ready_T_2; // @[SourceD.scala:79:22, :242:{37,53}]
assign _s3_ready_T_4 = _s3_ready_T | _s3_ready_T_3; // @[SourceD.scala:242:{15,24,37}]
assign s3_ready = _s3_ready_T_4; // @[SourceD.scala:78:22, :242:24]
wire _s4_latch_T = s3_valid & s3_retires; // @[SourceD.scala:76:22, :199:29, :247:27]
wire s4_latch = _s4_latch_T & s4_ready; // @[SourceD.scala:79:22, :247:{27,41}]
reg s4_full; // @[SourceD.scala:248:24]
reg [1:0] s4_beat; // @[SourceD.scala:249:26]
assign io_bs_wadr_bits_beat_0 = s4_beat; // @[SourceD.scala:48:7, :249:26]
wire [1:0] pre_s4_beat = s4_latch ? s3_beat : s4_beat; // @[SourceD.scala:192:26, :247:41, :249:26, :320:24]
reg s4_need_r; // @[SourceD.scala:250:28]
reg s4_need_bs; // @[SourceD.scala:251:29]
reg s4_need_pb; // @[SourceD.scala:252:29]
reg s4_req_prio_0; // @[SourceD.scala:253:25]
reg s4_req_prio_1; // @[SourceD.scala:253:25]
reg s4_req_prio_2; // @[SourceD.scala:253:25]
reg s4_req_control; // @[SourceD.scala:253:25]
reg [2:0] s4_req_opcode; // @[SourceD.scala:253:25]
reg [2:0] s4_req_param; // @[SourceD.scala:253:25]
reg [2:0] s4_req_size; // @[SourceD.scala:253:25]
reg [5:0] s4_req_source; // @[SourceD.scala:253:25]
reg [12:0] s4_req_tag; // @[SourceD.scala:253:25]
reg [5:0] s4_req_offset; // @[SourceD.scala:253:25]
reg [5:0] s4_req_put; // @[SourceD.scala:253:25]
reg [9:0] s4_req_set; // @[SourceD.scala:253:25]
assign io_bs_wadr_bits_set_0 = s4_req_set; // @[SourceD.scala:48:7, :253:25]
reg [3:0] s4_req_sink; // @[SourceD.scala:253:25]
reg [2:0] s4_req_way; // @[SourceD.scala:253:25]
assign io_bs_wadr_bits_way_0 = s4_req_way; // @[SourceD.scala:48:7, :253:25]
reg s4_req_bad; // @[SourceD.scala:253:25]
wire pre_s4_req_prio_0 = s4_latch ? s3_req_prio_0 : s4_req_prio_0; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24]
wire pre_s4_req_prio_1 = s4_latch ? s3_req_prio_1 : s4_req_prio_1; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24]
wire pre_s4_req_prio_2 = s4_latch ? s3_req_prio_2 : s4_req_prio_2; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24]
wire pre_s4_req_control = s4_latch ? s3_req_control : s4_req_control; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24]
wire [2:0] pre_s4_req_opcode = s4_latch ? s3_req_opcode : s4_req_opcode; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24]
wire [2:0] pre_s4_req_param = s4_latch ? s3_req_param : s4_req_param; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24]
wire [2:0] pre_s4_req_size = s4_latch ? s3_req_size : s4_req_size; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24]
wire [5:0] pre_s4_req_source = s4_latch ? s3_req_source : s4_req_source; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24]
wire [12:0] pre_s4_req_tag = s4_latch ? s3_req_tag : s4_req_tag; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24]
wire [5:0] pre_s4_req_offset = s4_latch ? s3_req_offset : s4_req_offset; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24]
wire [5:0] pre_s4_req_put = s4_latch ? s3_req_put : s4_req_put; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24]
wire [9:0] pre_s4_req_set = s4_latch ? s3_req_set : s4_req_set; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24]
wire [3:0] pre_s4_req_sink = s4_latch ? s3_req_sink : s4_req_sink; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24]
wire [2:0] pre_s4_req_way = s4_latch ? s3_req_way : s4_req_way; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24]
wire pre_s4_req_bad = s4_latch ? s3_req_bad : s4_req_bad; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24]
reg [2:0] s4_adjusted_opcode; // @[SourceD.scala:254:37]
reg [127:0] s4_pdata_data; // @[SourceD.scala:255:27]
reg [15:0] s4_pdata_mask; // @[SourceD.scala:255:27]
reg s4_pdata_corrupt; // @[SourceD.scala:255:27]
reg [127:0] s4_rdata; // @[SourceD.scala:256:27]
assign _io_bs_wadr_valid_T = s4_full & s4_need_bs; // @[SourceD.scala:248:24, :251:29, :270:31]
assign io_bs_wadr_valid_0 = _io_bs_wadr_valid_T; // @[SourceD.scala:48:7, :270:31]
wire _io_bs_wadr_bits_mask_T = s4_pdata_mask[0]; // @[SourceD.scala:255:27, :275:45]
wire _io_bs_wadr_bits_mask_T_1 = s4_pdata_mask[1]; // @[SourceD.scala:255:27, :275:45]
wire _io_bs_wadr_bits_mask_T_2 = s4_pdata_mask[2]; // @[SourceD.scala:255:27, :275:45]
wire _io_bs_wadr_bits_mask_T_3 = s4_pdata_mask[3]; // @[SourceD.scala:255:27, :275:45]
wire _io_bs_wadr_bits_mask_T_4 = s4_pdata_mask[4]; // @[SourceD.scala:255:27, :275:45]
wire _io_bs_wadr_bits_mask_T_5 = s4_pdata_mask[5]; // @[SourceD.scala:255:27, :275:45]
wire _io_bs_wadr_bits_mask_T_6 = s4_pdata_mask[6]; // @[SourceD.scala:255:27, :275:45]
wire _io_bs_wadr_bits_mask_T_7 = s4_pdata_mask[7]; // @[SourceD.scala:255:27, :275:45]
wire _io_bs_wadr_bits_mask_T_8 = s4_pdata_mask[8]; // @[SourceD.scala:255:27, :275:45]
wire _io_bs_wadr_bits_mask_T_9 = s4_pdata_mask[9]; // @[SourceD.scala:255:27, :275:45]
wire _io_bs_wadr_bits_mask_T_10 = s4_pdata_mask[10]; // @[SourceD.scala:255:27, :275:45]
wire _io_bs_wadr_bits_mask_T_11 = s4_pdata_mask[11]; // @[SourceD.scala:255:27, :275:45]
wire _io_bs_wadr_bits_mask_T_12 = s4_pdata_mask[12]; // @[SourceD.scala:255:27, :275:45]
wire _io_bs_wadr_bits_mask_T_13 = s4_pdata_mask[13]; // @[SourceD.scala:255:27, :275:45]
wire _io_bs_wadr_bits_mask_T_14 = s4_pdata_mask[14]; // @[SourceD.scala:255:27, :275:45]
wire _io_bs_wadr_bits_mask_T_15 = s4_pdata_mask[15]; // @[SourceD.scala:255:27, :275:45]
wire _io_bs_wadr_bits_mask_T_16 = _io_bs_wadr_bits_mask_T | _io_bs_wadr_bits_mask_T_1; // @[SourceD.scala:275:{45,87}]
wire _io_bs_wadr_bits_mask_T_17 = _io_bs_wadr_bits_mask_T_16 | _io_bs_wadr_bits_mask_T_2; // @[SourceD.scala:275:{45,87}]
wire _io_bs_wadr_bits_mask_T_18 = _io_bs_wadr_bits_mask_T_17 | _io_bs_wadr_bits_mask_T_3; // @[SourceD.scala:275:{45,87}]
wire _io_bs_wadr_bits_mask_T_19 = _io_bs_wadr_bits_mask_T_18 | _io_bs_wadr_bits_mask_T_4; // @[SourceD.scala:275:{45,87}]
wire _io_bs_wadr_bits_mask_T_20 = _io_bs_wadr_bits_mask_T_19 | _io_bs_wadr_bits_mask_T_5; // @[SourceD.scala:275:{45,87}]
wire _io_bs_wadr_bits_mask_T_21 = _io_bs_wadr_bits_mask_T_20 | _io_bs_wadr_bits_mask_T_6; // @[SourceD.scala:275:{45,87}]
wire _io_bs_wadr_bits_mask_T_22 = _io_bs_wadr_bits_mask_T_21 | _io_bs_wadr_bits_mask_T_7; // @[SourceD.scala:275:{45,87}]
wire _io_bs_wadr_bits_mask_T_23 = _io_bs_wadr_bits_mask_T_8 | _io_bs_wadr_bits_mask_T_9; // @[SourceD.scala:275:{45,87}]
wire _io_bs_wadr_bits_mask_T_24 = _io_bs_wadr_bits_mask_T_23 | _io_bs_wadr_bits_mask_T_10; // @[SourceD.scala:275:{45,87}]
wire _io_bs_wadr_bits_mask_T_25 = _io_bs_wadr_bits_mask_T_24 | _io_bs_wadr_bits_mask_T_11; // @[SourceD.scala:275:{45,87}]
wire _io_bs_wadr_bits_mask_T_26 = _io_bs_wadr_bits_mask_T_25 | _io_bs_wadr_bits_mask_T_12; // @[SourceD.scala:275:{45,87}]
wire _io_bs_wadr_bits_mask_T_27 = _io_bs_wadr_bits_mask_T_26 | _io_bs_wadr_bits_mask_T_13; // @[SourceD.scala:275:{45,87}]
wire _io_bs_wadr_bits_mask_T_28 = _io_bs_wadr_bits_mask_T_27 | _io_bs_wadr_bits_mask_T_14; // @[SourceD.scala:275:{45,87}]
wire _io_bs_wadr_bits_mask_T_29 = _io_bs_wadr_bits_mask_T_28 | _io_bs_wadr_bits_mask_T_15; // @[SourceD.scala:275:{45,87}]
assign _io_bs_wadr_bits_mask_T_30 = {_io_bs_wadr_bits_mask_T_29, _io_bs_wadr_bits_mask_T_22}; // @[SourceD.scala:275:{30,87}]
assign io_bs_wadr_bits_mask_0 = _io_bs_wadr_bits_mask_T_30; // @[SourceD.scala:48:7, :275:30] |
Generate the Verilog code corresponding to the following Chisel files.
File primitives.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object lowMask
{
def apply(in: UInt, topBound: BigInt, bottomBound: BigInt): UInt =
{
require(topBound != bottomBound)
val numInVals = BigInt(1)<<in.getWidth
if (topBound < bottomBound) {
lowMask(~in, numInVals - 1 - topBound, numInVals - 1 - bottomBound)
} else if (numInVals > 64 /* Empirical */) {
// For simulation performance, we should avoid generating
// exteremely wide shifters, so we divide and conquer.
// Empirically, this does not impact synthesis QoR.
val mid = numInVals / 2
val msb = in(in.getWidth - 1)
val lsbs = in(in.getWidth - 2, 0)
if (mid < topBound) {
if (mid <= bottomBound) {
Mux(msb,
lowMask(lsbs, topBound - mid, bottomBound - mid),
0.U
)
} else {
Mux(msb,
lowMask(lsbs, topBound - mid, 0) ## ((BigInt(1)<<(mid - bottomBound).toInt) - 1).U,
lowMask(lsbs, mid, bottomBound)
)
}
} else {
~Mux(msb, 0.U, ~lowMask(lsbs, topBound, bottomBound))
}
} else {
val shift = (BigInt(-1)<<numInVals.toInt).S>>in
Reverse(
shift(
(numInVals - 1 - bottomBound).toInt,
(numInVals - topBound).toInt
)
)
}
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object countLeadingZeros
{
def apply(in: UInt): UInt = PriorityEncoder(in.asBools.reverse)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy2
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 1)>>1
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 2 + 1, ix * 2).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 2).orR
reducedVec.asUInt
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy4
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 3)>>2
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 4 + 3, ix * 4).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 4).orR
reducedVec.asUInt
}
}
File RoundAnyRawFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.Fill
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundAnyRawFNToRecFN(
inExpWidth: Int,
inSigWidth: Int,
outExpWidth: Int,
outSigWidth: Int,
options: Int
)
extends RawModule
{
override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(inExpWidth, inSigWidth))
// (allowed exponent range has limits)
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0)
val effectiveInSigWidth =
if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1
val neverUnderflows =
((options &
(flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact)
) != 0) ||
(inExpWidth < outExpWidth)
val neverOverflows =
((options & flRoundOpt_neverOverflows) != 0) ||
(inExpWidth < outExpWidth)
val outNaNExp = BigInt(7)<<(outExpWidth - 2)
val outInfExp = BigInt(6)<<(outExpWidth - 2)
val outMaxFiniteExp = outInfExp - 1
val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2
val outMinNonzeroExp = outMinNormExp - outSigWidth + 1
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
val roundMagUp =
(roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sAdjustedExp =
if (inExpWidth < outExpWidth)
(io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
)(outExpWidth, 0).zext
else if (inExpWidth == outExpWidth)
io.in.sExp
else
io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
val adjustedSig =
if (inSigWidth <= outSigWidth + 2)
io.in.sig<<(outSigWidth - inSigWidth + 2)
else
(io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ##
io.in.sig(inSigWidth - outSigWidth - 2, 0).orR
)
val doShiftSigDown1 =
if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2)
val common_expOut = Wire(UInt((outExpWidth + 1).W))
val common_fractOut = Wire(UInt((outSigWidth - 1).W))
val common_overflow = Wire(Bool())
val common_totalUnderflow = Wire(Bool())
val common_underflow = Wire(Bool())
val common_inexact = Wire(Bool())
if (
neverOverflows && neverUnderflows
&& (effectiveInSigWidth <= outSigWidth)
) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1
common_fractOut :=
Mux(doShiftSigDown1,
adjustedSig(outSigWidth + 1, 3),
adjustedSig(outSigWidth, 2)
)
common_overflow := false.B
common_totalUnderflow := false.B
common_underflow := false.B
common_inexact := false.B
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundMask =
if (neverUnderflows)
0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W)
else
(lowMask(
sAdjustedExp(outExpWidth, 0),
outMinNormExp - outSigWidth - 1,
outMinNormExp
) | doShiftSigDown1) ##
3.U(2.W)
val shiftedRoundMask = 0.U(1.W) ## roundMask>>1
val roundPosMask = ~shiftedRoundMask & roundMask
val roundPosBit = (adjustedSig & roundPosMask).orR
val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR
val anyRound = roundPosBit || anyRoundExtra
val roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
roundPosBit) ||
(roundMagUp && anyRound)
val roundedSig: Bits =
Mux(roundIncr,
(((adjustedSig | roundMask)>>2) +& 1.U) &
~Mux(roundingMode_near_even && roundPosBit &&
! anyRoundExtra,
roundMask>>1,
0.U((outSigWidth + 2).W)
),
(adjustedSig & ~roundMask)>>2 |
Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U)
)
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext
common_expOut := sRoundedExp(outExpWidth, 0)
common_fractOut :=
Mux(doShiftSigDown1,
roundedSig(outSigWidth - 1, 1),
roundedSig(outSigWidth - 2, 0)
)
common_overflow :=
(if (neverOverflows) false.B else
//*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?:
(sRoundedExp>>(outExpWidth - 1) >= 3.S))
common_totalUnderflow :=
(if (neverUnderflows) false.B else
//*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?:
(sRoundedExp < outMinNonzeroExp.S))
val unboundedRange_roundPosBit =
Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1))
val unboundedRange_anyRound =
(doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR
val unboundedRange_roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
unboundedRange_roundPosBit) ||
(roundMagUp && unboundedRange_anyRound)
val roundCarry =
Mux(doShiftSigDown1,
roundedSig(outSigWidth + 1),
roundedSig(outSigWidth)
)
common_underflow :=
(if (neverUnderflows) false.B else
common_totalUnderflow ||
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
(anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) &&
Mux(doShiftSigDown1, roundMask(3), roundMask(2)) &&
! ((io.detectTininess === tininess_afterRounding) &&
! Mux(doShiftSigDown1,
roundMask(4),
roundMask(3)
) &&
roundCarry && roundPosBit &&
unboundedRange_roundIncr)))
common_inexact := common_totalUnderflow || anyRound
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val isNaNOut = io.invalidExc || io.in.isNaN
val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf
val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero
val overflow = commonCase && common_overflow
val underflow = commonCase && common_underflow
val inexact = overflow || (commonCase && common_inexact)
val overflow_roundMagUp =
roundingMode_near_even || roundingMode_near_maxMag || roundMagUp
val pegMinNonzeroMagOut =
commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd)
val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp
val notNaN_isInfOut =
notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp)
val signOut = Mux(isNaNOut, false.B, io.in.sign)
val expOut =
(common_expOut &
~Mux(io.in.isZero || common_totalUnderflow,
(BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMinNonzeroMagOut,
~outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMaxFiniteMagOut,
(BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W),
0.U
) &
~Mux(notNaN_isInfOut,
(BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
)) |
Mux(pegMinNonzeroMagOut,
outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) |
Mux(pegMaxFiniteMagOut,
outMaxFiniteExp.U((outExpWidth + 1).W),
0.U
) |
Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) |
Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U)
val fractOut =
Mux(isNaNOut || io.in.isZero || common_totalUnderflow,
Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U),
common_fractOut
) |
Fill(outSigWidth - 1, pegMaxFiniteMagOut)
io.out := signOut ## expOut ## fractOut
io.exceptionFlags :=
io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int)
extends RawModule
{
override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(expWidth, sigWidth + 2))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
expWidth, sigWidth + 2, expWidth, sigWidth, options))
roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc
roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc
roundAnyRawFNToRecFN.io.in := io.in
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
| module RoundAnyRawFNToRecFN_ie8_is26_oe8_os24_69( // @[RoundAnyRawFNToRecFN.scala:48:5]
input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [9:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [26:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:58:16]
output [32:0] io_out, // @[RoundAnyRawFNToRecFN.scala:58:16]
output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:58:16]
);
wire io_invalidExc_0 = io_invalidExc; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isNaN_0 = io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isInf_0 = io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [9:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [26:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [8:0] _expOut_T_4 = 9'h194; // @[RoundAnyRawFNToRecFN.scala:258:19]
wire [15:0] _roundMask_T_5 = 16'hFF; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_4 = 16'hFF00; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_10 = 16'hFF00; // @[primitives.scala:77:20]
wire [11:0] _roundMask_T_13 = 12'hFF; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_14 = 16'hFF0; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_15 = 16'hF0F; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_20 = 16'hF0F0; // @[primitives.scala:77:20]
wire [13:0] _roundMask_T_23 = 14'hF0F; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_24 = 16'h3C3C; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_25 = 16'h3333; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_30 = 16'hCCCC; // @[primitives.scala:77:20]
wire [14:0] _roundMask_T_33 = 15'h3333; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_34 = 16'h6666; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_35 = 16'h5555; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_40 = 16'hAAAA; // @[primitives.scala:77:20]
wire [25:0] _roundedSig_T_15 = 26'h0; // @[RoundAnyRawFNToRecFN.scala:181:24]
wire [8:0] _expOut_T_6 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14]
wire [8:0] _expOut_T_9 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14]
wire [8:0] _expOut_T_5 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:257:18]
wire [8:0] _expOut_T_8 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:261:18]
wire [8:0] _expOut_T_14 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:269:16]
wire [8:0] _expOut_T_16 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:273:16]
wire [22:0] _fractOut_T_4 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:284:13]
wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire roundingMode_near_even = 1'h1; // @[RoundAnyRawFNToRecFN.scala:90:53]
wire _roundIncr_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:169:38]
wire _unboundedRange_roundIncr_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:207:38]
wire _common_underflow_T_7 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:222:49]
wire _overflow_roundMagUp_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:243:32]
wire overflow_roundMagUp = 1'h1; // @[RoundAnyRawFNToRecFN.scala:243:60]
wire [2:0] io_roundingMode = 3'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire roundingMode_minMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:91:53]
wire roundingMode_min = 1'h0; // @[RoundAnyRawFNToRecFN.scala:92:53]
wire roundingMode_max = 1'h0; // @[RoundAnyRawFNToRecFN.scala:93:53]
wire roundingMode_near_maxMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:94:53]
wire roundingMode_odd = 1'h0; // @[RoundAnyRawFNToRecFN.scala:95:53]
wire _roundMagUp_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:27]
wire _roundMagUp_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:63]
wire roundMagUp = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:42]
wire _roundIncr_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:171:29]
wire _roundedSig_T_13 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:181:42]
wire _unboundedRange_roundIncr_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:209:29]
wire _pegMinNonzeroMagOut_T_1 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:60]
wire pegMinNonzeroMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:45]
wire _pegMaxFiniteMagOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:42]
wire pegMaxFiniteMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:39]
wire notNaN_isSpecialInfOut = io_in_isInf_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :236:49]
wire [26:0] adjustedSig = io_in_sig_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :114:22]
wire [32:0] _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:286:33]
wire [4:0] _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:288:66]
wire [32:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire _roundMagUp_T_1 = ~io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :98:66]
wire doShiftSigDown1 = adjustedSig[26]; // @[RoundAnyRawFNToRecFN.scala:114:22, :120:57]
wire [8:0] _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:187:37]
wire [8:0] common_expOut; // @[RoundAnyRawFNToRecFN.scala:122:31]
wire [22:0] _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:189:16]
wire [22:0] common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31]
wire _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:196:50]
wire common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37]
wire _common_totalUnderflow_T; // @[RoundAnyRawFNToRecFN.scala:200:31]
wire common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37]
wire _common_underflow_T_18; // @[RoundAnyRawFNToRecFN.scala:217:40]
wire common_underflow; // @[RoundAnyRawFNToRecFN.scala:126:37]
wire _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:230:49]
wire common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37]
wire [8:0] _roundMask_T = io_in_sExp_0[8:0]; // @[RoundAnyRawFNToRecFN.scala:48:5, :156:37]
wire [8:0] _roundMask_T_1 = ~_roundMask_T; // @[primitives.scala:52:21]
wire roundMask_msb = _roundMask_T_1[8]; // @[primitives.scala:52:21, :58:25]
wire [7:0] roundMask_lsbs = _roundMask_T_1[7:0]; // @[primitives.scala:52:21, :59:26]
wire roundMask_msb_1 = roundMask_lsbs[7]; // @[primitives.scala:58:25, :59:26]
wire [6:0] roundMask_lsbs_1 = roundMask_lsbs[6:0]; // @[primitives.scala:59:26]
wire roundMask_msb_2 = roundMask_lsbs_1[6]; // @[primitives.scala:58:25, :59:26]
wire roundMask_msb_3 = roundMask_lsbs_1[6]; // @[primitives.scala:58:25, :59:26]
wire [5:0] roundMask_lsbs_2 = roundMask_lsbs_1[5:0]; // @[primitives.scala:59:26]
wire [5:0] roundMask_lsbs_3 = roundMask_lsbs_1[5:0]; // @[primitives.scala:59:26]
wire [64:0] roundMask_shift = $signed(65'sh10000000000000000 >>> roundMask_lsbs_2); // @[primitives.scala:59:26, :76:56]
wire [21:0] _roundMask_T_2 = roundMask_shift[63:42]; // @[primitives.scala:76:56, :78:22]
wire [15:0] _roundMask_T_3 = _roundMask_T_2[15:0]; // @[primitives.scala:77:20, :78:22]
wire [7:0] _roundMask_T_6 = _roundMask_T_3[15:8]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_7 = {8'h0, _roundMask_T_6}; // @[primitives.scala:77:20]
wire [7:0] _roundMask_T_8 = _roundMask_T_3[7:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_9 = {_roundMask_T_8, 8'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_11 = _roundMask_T_9 & 16'hFF00; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_12 = _roundMask_T_7 | _roundMask_T_11; // @[primitives.scala:77:20]
wire [11:0] _roundMask_T_16 = _roundMask_T_12[15:4]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_17 = {4'h0, _roundMask_T_16 & 12'hF0F}; // @[primitives.scala:77:20]
wire [11:0] _roundMask_T_18 = _roundMask_T_12[11:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_19 = {_roundMask_T_18, 4'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_21 = _roundMask_T_19 & 16'hF0F0; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_22 = _roundMask_T_17 | _roundMask_T_21; // @[primitives.scala:77:20]
wire [13:0] _roundMask_T_26 = _roundMask_T_22[15:2]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_27 = {2'h0, _roundMask_T_26 & 14'h3333}; // @[primitives.scala:77:20]
wire [13:0] _roundMask_T_28 = _roundMask_T_22[13:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_29 = {_roundMask_T_28, 2'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_31 = _roundMask_T_29 & 16'hCCCC; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_32 = _roundMask_T_27 | _roundMask_T_31; // @[primitives.scala:77:20]
wire [14:0] _roundMask_T_36 = _roundMask_T_32[15:1]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_37 = {1'h0, _roundMask_T_36 & 15'h5555}; // @[primitives.scala:77:20]
wire [14:0] _roundMask_T_38 = _roundMask_T_32[14:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_39 = {_roundMask_T_38, 1'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_41 = _roundMask_T_39 & 16'hAAAA; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_42 = _roundMask_T_37 | _roundMask_T_41; // @[primitives.scala:77:20]
wire [5:0] _roundMask_T_43 = _roundMask_T_2[21:16]; // @[primitives.scala:77:20, :78:22]
wire [3:0] _roundMask_T_44 = _roundMask_T_43[3:0]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_45 = _roundMask_T_44[1:0]; // @[primitives.scala:77:20]
wire _roundMask_T_46 = _roundMask_T_45[0]; // @[primitives.scala:77:20]
wire _roundMask_T_47 = _roundMask_T_45[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_48 = {_roundMask_T_46, _roundMask_T_47}; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_49 = _roundMask_T_44[3:2]; // @[primitives.scala:77:20]
wire _roundMask_T_50 = _roundMask_T_49[0]; // @[primitives.scala:77:20]
wire _roundMask_T_51 = _roundMask_T_49[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_52 = {_roundMask_T_50, _roundMask_T_51}; // @[primitives.scala:77:20]
wire [3:0] _roundMask_T_53 = {_roundMask_T_48, _roundMask_T_52}; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_54 = _roundMask_T_43[5:4]; // @[primitives.scala:77:20]
wire _roundMask_T_55 = _roundMask_T_54[0]; // @[primitives.scala:77:20]
wire _roundMask_T_56 = _roundMask_T_54[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_57 = {_roundMask_T_55, _roundMask_T_56}; // @[primitives.scala:77:20]
wire [5:0] _roundMask_T_58 = {_roundMask_T_53, _roundMask_T_57}; // @[primitives.scala:77:20]
wire [21:0] _roundMask_T_59 = {_roundMask_T_42, _roundMask_T_58}; // @[primitives.scala:77:20]
wire [21:0] _roundMask_T_60 = ~_roundMask_T_59; // @[primitives.scala:73:32, :77:20]
wire [21:0] _roundMask_T_61 = roundMask_msb_2 ? 22'h0 : _roundMask_T_60; // @[primitives.scala:58:25, :73:{21,32}]
wire [21:0] _roundMask_T_62 = ~_roundMask_T_61; // @[primitives.scala:73:{17,21}]
wire [24:0] _roundMask_T_63 = {_roundMask_T_62, 3'h7}; // @[primitives.scala:68:58, :73:17]
wire [64:0] roundMask_shift_1 = $signed(65'sh10000000000000000 >>> roundMask_lsbs_3); // @[primitives.scala:59:26, :76:56]
wire [2:0] _roundMask_T_64 = roundMask_shift_1[2:0]; // @[primitives.scala:76:56, :78:22]
wire [1:0] _roundMask_T_65 = _roundMask_T_64[1:0]; // @[primitives.scala:77:20, :78:22]
wire _roundMask_T_66 = _roundMask_T_65[0]; // @[primitives.scala:77:20]
wire _roundMask_T_67 = _roundMask_T_65[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_68 = {_roundMask_T_66, _roundMask_T_67}; // @[primitives.scala:77:20]
wire _roundMask_T_69 = _roundMask_T_64[2]; // @[primitives.scala:77:20, :78:22]
wire [2:0] _roundMask_T_70 = {_roundMask_T_68, _roundMask_T_69}; // @[primitives.scala:77:20]
wire [2:0] _roundMask_T_71 = roundMask_msb_3 ? _roundMask_T_70 : 3'h0; // @[primitives.scala:58:25, :62:24, :77:20]
wire [24:0] _roundMask_T_72 = roundMask_msb_1 ? _roundMask_T_63 : {22'h0, _roundMask_T_71}; // @[primitives.scala:58:25, :62:24, :67:24, :68:58]
wire [24:0] _roundMask_T_73 = roundMask_msb ? _roundMask_T_72 : 25'h0; // @[primitives.scala:58:25, :62:24, :67:24]
wire [24:0] _roundMask_T_74 = {_roundMask_T_73[24:1], _roundMask_T_73[0] | doShiftSigDown1}; // @[primitives.scala:62:24]
wire [26:0] roundMask = {_roundMask_T_74, 2'h3}; // @[RoundAnyRawFNToRecFN.scala:159:{23,42}]
wire [27:0] _shiftedRoundMask_T = {1'h0, roundMask}; // @[RoundAnyRawFNToRecFN.scala:159:42, :162:41]
wire [26:0] shiftedRoundMask = _shiftedRoundMask_T[27:1]; // @[RoundAnyRawFNToRecFN.scala:162:{41,53}]
wire [26:0] _roundPosMask_T = ~shiftedRoundMask; // @[RoundAnyRawFNToRecFN.scala:162:53, :163:28]
wire [26:0] roundPosMask = _roundPosMask_T & roundMask; // @[RoundAnyRawFNToRecFN.scala:159:42, :163:{28,46}]
wire [26:0] _roundPosBit_T = adjustedSig & roundPosMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :163:46, :164:40]
wire roundPosBit = |_roundPosBit_T; // @[RoundAnyRawFNToRecFN.scala:164:{40,56}]
wire _roundIncr_T_1 = roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :169:67]
wire _roundedSig_T_3 = roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :175:49]
wire [26:0] _anyRoundExtra_T = adjustedSig & shiftedRoundMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :162:53, :165:42]
wire anyRoundExtra = |_anyRoundExtra_T; // @[RoundAnyRawFNToRecFN.scala:165:{42,62}]
wire anyRound = roundPosBit | anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:164:56, :165:62, :166:36]
wire roundIncr = _roundIncr_T_1; // @[RoundAnyRawFNToRecFN.scala:169:67, :170:31]
wire [26:0] _roundedSig_T = adjustedSig | roundMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :159:42, :174:32]
wire [24:0] _roundedSig_T_1 = _roundedSig_T[26:2]; // @[RoundAnyRawFNToRecFN.scala:174:{32,44}]
wire [25:0] _roundedSig_T_2 = {1'h0, _roundedSig_T_1} + 26'h1; // @[RoundAnyRawFNToRecFN.scala:174:{44,49}]
wire _roundedSig_T_4 = ~anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:165:62, :176:30]
wire _roundedSig_T_5 = _roundedSig_T_3 & _roundedSig_T_4; // @[RoundAnyRawFNToRecFN.scala:175:{49,64}, :176:30]
wire [25:0] _roundedSig_T_6 = roundMask[26:1]; // @[RoundAnyRawFNToRecFN.scala:159:42, :177:35]
wire [25:0] _roundedSig_T_7 = _roundedSig_T_5 ? _roundedSig_T_6 : 26'h0; // @[RoundAnyRawFNToRecFN.scala:175:{25,64}, :177:35]
wire [25:0] _roundedSig_T_8 = ~_roundedSig_T_7; // @[RoundAnyRawFNToRecFN.scala:175:{21,25}]
wire [25:0] _roundedSig_T_9 = _roundedSig_T_2 & _roundedSig_T_8; // @[RoundAnyRawFNToRecFN.scala:174:{49,57}, :175:21]
wire [26:0] _roundedSig_T_10 = ~roundMask; // @[RoundAnyRawFNToRecFN.scala:159:42, :180:32]
wire [26:0] _roundedSig_T_11 = adjustedSig & _roundedSig_T_10; // @[RoundAnyRawFNToRecFN.scala:114:22, :180:{30,32}]
wire [24:0] _roundedSig_T_12 = _roundedSig_T_11[26:2]; // @[RoundAnyRawFNToRecFN.scala:180:{30,43}]
wire [25:0] _roundedSig_T_14 = roundPosMask[26:1]; // @[RoundAnyRawFNToRecFN.scala:163:46, :181:67]
wire [25:0] _roundedSig_T_16 = {1'h0, _roundedSig_T_12}; // @[RoundAnyRawFNToRecFN.scala:180:{43,47}]
wire [25:0] roundedSig = roundIncr ? _roundedSig_T_9 : _roundedSig_T_16; // @[RoundAnyRawFNToRecFN.scala:170:31, :173:16, :174:57, :180:47]
wire [1:0] _sRoundedExp_T = roundedSig[25:24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :185:54]
wire [2:0] _sRoundedExp_T_1 = {1'h0, _sRoundedExp_T}; // @[RoundAnyRawFNToRecFN.scala:185:{54,76}]
wire [10:0] sRoundedExp = {io_in_sExp_0[9], io_in_sExp_0} + {{8{_sRoundedExp_T_1[2]}}, _sRoundedExp_T_1}; // @[RoundAnyRawFNToRecFN.scala:48:5, :185:{40,76}]
assign _common_expOut_T = sRoundedExp[8:0]; // @[RoundAnyRawFNToRecFN.scala:185:40, :187:37]
assign common_expOut = _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:122:31, :187:37]
wire [22:0] _common_fractOut_T = roundedSig[23:1]; // @[RoundAnyRawFNToRecFN.scala:173:16, :190:27]
wire [22:0] _common_fractOut_T_1 = roundedSig[22:0]; // @[RoundAnyRawFNToRecFN.scala:173:16, :191:27]
assign _common_fractOut_T_2 = doShiftSigDown1 ? _common_fractOut_T : _common_fractOut_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :189:16, :190:27, :191:27]
assign common_fractOut = _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:123:31, :189:16]
wire [3:0] _common_overflow_T = sRoundedExp[10:7]; // @[RoundAnyRawFNToRecFN.scala:185:40, :196:30]
assign _common_overflow_T_1 = $signed(_common_overflow_T) > 4'sh2; // @[RoundAnyRawFNToRecFN.scala:196:{30,50}]
assign common_overflow = _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:124:37, :196:50]
assign _common_totalUnderflow_T = $signed(sRoundedExp) < 11'sh6B; // @[RoundAnyRawFNToRecFN.scala:185:40, :200:31]
assign common_totalUnderflow = _common_totalUnderflow_T; // @[RoundAnyRawFNToRecFN.scala:125:37, :200:31]
wire _unboundedRange_roundPosBit_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:45]
wire _unboundedRange_anyRound_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:45, :205:44]
wire _unboundedRange_roundPosBit_T_1 = adjustedSig[1]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:61]
wire unboundedRange_roundPosBit = doShiftSigDown1 ? _unboundedRange_roundPosBit_T : _unboundedRange_roundPosBit_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :203:{16,45,61}]
wire _unboundedRange_roundIncr_T_1 = unboundedRange_roundPosBit; // @[RoundAnyRawFNToRecFN.scala:203:16, :207:67]
wire _unboundedRange_anyRound_T_1 = doShiftSigDown1 & _unboundedRange_anyRound_T; // @[RoundAnyRawFNToRecFN.scala:120:57, :205:{30,44}]
wire [1:0] _unboundedRange_anyRound_T_2 = adjustedSig[1:0]; // @[RoundAnyRawFNToRecFN.scala:114:22, :205:63]
wire _unboundedRange_anyRound_T_3 = |_unboundedRange_anyRound_T_2; // @[RoundAnyRawFNToRecFN.scala:205:{63,70}]
wire unboundedRange_anyRound = _unboundedRange_anyRound_T_1 | _unboundedRange_anyRound_T_3; // @[RoundAnyRawFNToRecFN.scala:205:{30,49,70}]
wire unboundedRange_roundIncr = _unboundedRange_roundIncr_T_1; // @[RoundAnyRawFNToRecFN.scala:207:67, :208:46]
wire _roundCarry_T = roundedSig[25]; // @[RoundAnyRawFNToRecFN.scala:173:16, :212:27]
wire _roundCarry_T_1 = roundedSig[24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :213:27]
wire roundCarry = doShiftSigDown1 ? _roundCarry_T : _roundCarry_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :211:16, :212:27, :213:27]
wire [1:0] _common_underflow_T = io_in_sExp_0[9:8]; // @[RoundAnyRawFNToRecFN.scala:48:5, :220:49]
wire _common_underflow_T_1 = _common_underflow_T != 2'h1; // @[RoundAnyRawFNToRecFN.scala:220:{49,64}]
wire _common_underflow_T_2 = anyRound & _common_underflow_T_1; // @[RoundAnyRawFNToRecFN.scala:166:36, :220:{32,64}]
wire _common_underflow_T_3 = roundMask[3]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:57]
wire _common_underflow_T_9 = roundMask[3]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:57, :225:49]
wire _common_underflow_T_4 = roundMask[2]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:71]
wire _common_underflow_T_5 = doShiftSigDown1 ? _common_underflow_T_3 : _common_underflow_T_4; // @[RoundAnyRawFNToRecFN.scala:120:57, :221:{30,57,71}]
wire _common_underflow_T_6 = _common_underflow_T_2 & _common_underflow_T_5; // @[RoundAnyRawFNToRecFN.scala:220:{32,72}, :221:30]
wire _common_underflow_T_8 = roundMask[4]; // @[RoundAnyRawFNToRecFN.scala:159:42, :224:49]
wire _common_underflow_T_10 = doShiftSigDown1 ? _common_underflow_T_8 : _common_underflow_T_9; // @[RoundAnyRawFNToRecFN.scala:120:57, :223:39, :224:49, :225:49]
wire _common_underflow_T_11 = ~_common_underflow_T_10; // @[RoundAnyRawFNToRecFN.scala:223:{34,39}]
wire _common_underflow_T_12 = _common_underflow_T_11; // @[RoundAnyRawFNToRecFN.scala:222:77, :223:34]
wire _common_underflow_T_13 = _common_underflow_T_12 & roundCarry; // @[RoundAnyRawFNToRecFN.scala:211:16, :222:77, :226:38]
wire _common_underflow_T_14 = _common_underflow_T_13 & roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :226:38, :227:45]
wire _common_underflow_T_15 = _common_underflow_T_14 & unboundedRange_roundIncr; // @[RoundAnyRawFNToRecFN.scala:208:46, :227:{45,60}]
wire _common_underflow_T_16 = ~_common_underflow_T_15; // @[RoundAnyRawFNToRecFN.scala:222:27, :227:60]
wire _common_underflow_T_17 = _common_underflow_T_6 & _common_underflow_T_16; // @[RoundAnyRawFNToRecFN.scala:220:72, :221:76, :222:27]
assign _common_underflow_T_18 = common_totalUnderflow | _common_underflow_T_17; // @[RoundAnyRawFNToRecFN.scala:125:37, :217:40, :221:76]
assign common_underflow = _common_underflow_T_18; // @[RoundAnyRawFNToRecFN.scala:126:37, :217:40]
assign _common_inexact_T = common_totalUnderflow | anyRound; // @[RoundAnyRawFNToRecFN.scala:125:37, :166:36, :230:49]
assign common_inexact = _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:127:37, :230:49]
wire isNaNOut = io_invalidExc_0 | io_in_isNaN_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34]
wire _commonCase_T = ~isNaNOut; // @[RoundAnyRawFNToRecFN.scala:235:34, :237:22]
wire _commonCase_T_1 = ~notNaN_isSpecialInfOut; // @[RoundAnyRawFNToRecFN.scala:236:49, :237:36]
wire _commonCase_T_2 = _commonCase_T & _commonCase_T_1; // @[RoundAnyRawFNToRecFN.scala:237:{22,33,36}]
wire _commonCase_T_3 = ~io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :237:64]
wire commonCase = _commonCase_T_2 & _commonCase_T_3; // @[RoundAnyRawFNToRecFN.scala:237:{33,61,64}]
wire overflow = commonCase & common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37, :237:61, :238:32]
wire _notNaN_isInfOut_T = overflow; // @[RoundAnyRawFNToRecFN.scala:238:32, :248:45]
wire underflow = commonCase & common_underflow; // @[RoundAnyRawFNToRecFN.scala:126:37, :237:61, :239:32]
wire _inexact_T = commonCase & common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37, :237:61, :240:43]
wire inexact = overflow | _inexact_T; // @[RoundAnyRawFNToRecFN.scala:238:32, :240:{28,43}]
wire _pegMinNonzeroMagOut_T = commonCase & common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37, :237:61, :245:20]
wire notNaN_isInfOut = notNaN_isSpecialInfOut | _notNaN_isInfOut_T; // @[RoundAnyRawFNToRecFN.scala:236:49, :248:{32,45}]
wire signOut = ~isNaNOut & io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34, :250:22]
wire _expOut_T = io_in_isZero_0 | common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:48:5, :125:37, :253:32]
wire [8:0] _expOut_T_1 = _expOut_T ? 9'h1C0 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:{18,32}]
wire [8:0] _expOut_T_2 = ~_expOut_T_1; // @[RoundAnyRawFNToRecFN.scala:253:{14,18}]
wire [8:0] _expOut_T_3 = common_expOut & _expOut_T_2; // @[RoundAnyRawFNToRecFN.scala:122:31, :252:24, :253:14]
wire [8:0] _expOut_T_7 = _expOut_T_3; // @[RoundAnyRawFNToRecFN.scala:252:24, :256:17]
wire [8:0] _expOut_T_10 = _expOut_T_7; // @[RoundAnyRawFNToRecFN.scala:256:17, :260:17]
wire [8:0] _expOut_T_11 = {2'h0, notNaN_isInfOut, 6'h0}; // @[RoundAnyRawFNToRecFN.scala:248:32, :265:18]
wire [8:0] _expOut_T_12 = ~_expOut_T_11; // @[RoundAnyRawFNToRecFN.scala:265:{14,18}]
wire [8:0] _expOut_T_13 = _expOut_T_10 & _expOut_T_12; // @[RoundAnyRawFNToRecFN.scala:260:17, :264:17, :265:14]
wire [8:0] _expOut_T_15 = _expOut_T_13; // @[RoundAnyRawFNToRecFN.scala:264:17, :268:18]
wire [8:0] _expOut_T_17 = _expOut_T_15; // @[RoundAnyRawFNToRecFN.scala:268:18, :272:15]
wire [8:0] _expOut_T_18 = notNaN_isInfOut ? 9'h180 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:248:32, :277:16]
wire [8:0] _expOut_T_19 = _expOut_T_17 | _expOut_T_18; // @[RoundAnyRawFNToRecFN.scala:272:15, :276:15, :277:16]
wire [8:0] _expOut_T_20 = isNaNOut ? 9'h1C0 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:235:34, :278:16]
wire [8:0] expOut = _expOut_T_19 | _expOut_T_20; // @[RoundAnyRawFNToRecFN.scala:276:15, :277:73, :278:16]
wire _fractOut_T = isNaNOut | io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34, :280:22]
wire _fractOut_T_1 = _fractOut_T | common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37, :280:{22,38}]
wire [22:0] _fractOut_T_2 = {isNaNOut, 22'h0}; // @[RoundAnyRawFNToRecFN.scala:235:34, :281:16]
wire [22:0] _fractOut_T_3 = _fractOut_T_1 ? _fractOut_T_2 : common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31, :280:{12,38}, :281:16]
wire [22:0] fractOut = _fractOut_T_3; // @[RoundAnyRawFNToRecFN.scala:280:12, :283:11]
wire [9:0] _io_out_T = {signOut, expOut}; // @[RoundAnyRawFNToRecFN.scala:250:22, :277:73, :286:23]
assign _io_out_T_1 = {_io_out_T, fractOut}; // @[RoundAnyRawFNToRecFN.scala:283:11, :286:{23,33}]
assign io_out_0 = _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:48:5, :286:33]
wire [1:0] _io_exceptionFlags_T = {io_invalidExc_0, 1'h0}; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:23]
wire [2:0] _io_exceptionFlags_T_1 = {_io_exceptionFlags_T, overflow}; // @[RoundAnyRawFNToRecFN.scala:238:32, :288:{23,41}]
wire [3:0] _io_exceptionFlags_T_2 = {_io_exceptionFlags_T_1, underflow}; // @[RoundAnyRawFNToRecFN.scala:239:32, :288:{41,53}]
assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, inexact}; // @[RoundAnyRawFNToRecFN.scala:240:28, :288:{53,66}]
assign io_exceptionFlags_0 = _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:66]
assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File FPU.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tile
import chisel3._
import chisel3.util._
import chisel3.{DontCare, WireInit, withClock, withReset}
import chisel3.experimental.SourceInfo
import chisel3.experimental.dataview._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.rocket._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property
case class FPUParams(
minFLen: Int = 32,
fLen: Int = 64,
divSqrt: Boolean = true,
sfmaLatency: Int = 3,
dfmaLatency: Int = 4,
fpmuLatency: Int = 2,
ifpuLatency: Int = 2
)
object FPConstants
{
val RM_SZ = 3
val FLAGS_SZ = 5
}
trait HasFPUCtrlSigs {
val ldst = Bool()
val wen = Bool()
val ren1 = Bool()
val ren2 = Bool()
val ren3 = Bool()
val swap12 = Bool()
val swap23 = Bool()
val typeTagIn = UInt(2.W)
val typeTagOut = UInt(2.W)
val fromint = Bool()
val toint = Bool()
val fastpipe = Bool()
val fma = Bool()
val div = Bool()
val sqrt = Bool()
val wflags = Bool()
val vec = Bool()
}
class FPUCtrlSigs extends Bundle with HasFPUCtrlSigs
class FPUDecoder(implicit p: Parameters) extends FPUModule()(p) {
val io = IO(new Bundle {
val inst = Input(Bits(32.W))
val sigs = Output(new FPUCtrlSigs())
})
private val X2 = BitPat.dontCare(2)
val default = List(X,X,X,X,X,X,X,X2,X2,X,X,X,X,X,X,X,N)
val h: Array[(BitPat, List[BitPat])] =
Array(FLH -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSH -> List(Y,N,N,Y,N,Y,X, I, H,N,Y,N,N,N,N,N,N),
FMV_H_X -> List(N,Y,N,N,N,X,X, H, I,Y,N,N,N,N,N,N,N),
FCVT_H_W -> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_WU-> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_L -> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_LU-> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FMV_X_H -> List(N,N,Y,N,N,N,X, I, H,N,Y,N,N,N,N,N,N),
FCLASS_H -> List(N,N,Y,N,N,N,X, H, H,N,Y,N,N,N,N,N,N),
FCVT_W_H -> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_H-> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_H -> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_H-> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_S_H -> List(N,Y,Y,N,N,N,X, H, S,N,N,Y,N,N,N,Y,N),
FCVT_H_S -> List(N,Y,Y,N,N,N,X, S, H,N,N,Y,N,N,N,Y,N),
FEQ_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FLT_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FLE_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FSGNJ_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FSGNJN_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FSGNJX_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FMIN_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,Y,N),
FMAX_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,Y,N),
FADD_H -> List(N,Y,Y,Y,N,N,Y, H, H,N,N,N,Y,N,N,Y,N),
FSUB_H -> List(N,Y,Y,Y,N,N,Y, H, H,N,N,N,Y,N,N,Y,N),
FMUL_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,N,Y,N,N,Y,N),
FMADD_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FMSUB_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FNMADD_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FNMSUB_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FDIV_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,N,N,Y,N,Y,N),
FSQRT_H -> List(N,Y,Y,N,N,N,X, H, H,N,N,N,N,N,Y,Y,N))
val f: Array[(BitPat, List[BitPat])] =
Array(FLW -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSW -> List(Y,N,N,Y,N,Y,X, I, S,N,Y,N,N,N,N,N,N),
FMV_W_X -> List(N,Y,N,N,N,X,X, S, I,Y,N,N,N,N,N,N,N),
FCVT_S_W -> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_WU-> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_L -> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_LU-> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FMV_X_W -> List(N,N,Y,N,N,N,X, I, S,N,Y,N,N,N,N,N,N),
FCLASS_S -> List(N,N,Y,N,N,N,X, S, S,N,Y,N,N,N,N,N,N),
FCVT_W_S -> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_S-> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_S -> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_S-> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FEQ_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FLT_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FLE_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FSGNJ_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FSGNJN_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FSGNJX_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FMIN_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,Y,N),
FMAX_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,Y,N),
FADD_S -> List(N,Y,Y,Y,N,N,Y, S, S,N,N,N,Y,N,N,Y,N),
FSUB_S -> List(N,Y,Y,Y,N,N,Y, S, S,N,N,N,Y,N,N,Y,N),
FMUL_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,N,Y,N,N,Y,N),
FMADD_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FMSUB_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FNMADD_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FNMSUB_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FDIV_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,N,N,Y,N,Y,N),
FSQRT_S -> List(N,Y,Y,N,N,N,X, S, S,N,N,N,N,N,Y,Y,N))
val d: Array[(BitPat, List[BitPat])] =
Array(FLD -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSD -> List(Y,N,N,Y,N,Y,X, I, D,N,Y,N,N,N,N,N,N),
FMV_D_X -> List(N,Y,N,N,N,X,X, D, I,Y,N,N,N,N,N,N,N),
FCVT_D_W -> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_WU-> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_L -> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_LU-> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FMV_X_D -> List(N,N,Y,N,N,N,X, I, D,N,Y,N,N,N,N,N,N),
FCLASS_D -> List(N,N,Y,N,N,N,X, D, D,N,Y,N,N,N,N,N,N),
FCVT_W_D -> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_D-> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_D -> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_D-> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_S_D -> List(N,Y,Y,N,N,N,X, D, S,N,N,Y,N,N,N,Y,N),
FCVT_D_S -> List(N,Y,Y,N,N,N,X, S, D,N,N,Y,N,N,N,Y,N),
FEQ_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FLT_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FLE_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FSGNJ_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FSGNJN_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FSGNJX_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FMIN_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,Y,N),
FMAX_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,Y,N),
FADD_D -> List(N,Y,Y,Y,N,N,Y, D, D,N,N,N,Y,N,N,Y,N),
FSUB_D -> List(N,Y,Y,Y,N,N,Y, D, D,N,N,N,Y,N,N,Y,N),
FMUL_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,N,Y,N,N,Y,N),
FMADD_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FMSUB_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FNMADD_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FNMSUB_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FDIV_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,N,N,Y,N,Y,N),
FSQRT_D -> List(N,Y,Y,N,N,N,X, D, D,N,N,N,N,N,Y,Y,N))
val fcvt_hd: Array[(BitPat, List[BitPat])] =
Array(FCVT_H_D -> List(N,Y,Y,N,N,N,X, D, H,N,N,Y,N,N,N,Y,N),
FCVT_D_H -> List(N,Y,Y,N,N,N,X, H, D,N,N,Y,N,N,N,Y,N))
val vfmv_f_s: Array[(BitPat, List[BitPat])] =
Array(VFMV_F_S -> List(N,Y,N,N,N,N,X,X2,X2,N,N,N,N,N,N,N,Y))
val insns = ((minFLen, fLen) match {
case (32, 32) => f
case (16, 32) => h ++ f
case (32, 64) => f ++ d
case (16, 64) => h ++ f ++ d ++ fcvt_hd
case other => throw new Exception(s"minFLen = ${minFLen} & fLen = ${fLen} is an unsupported configuration")
}) ++ (if (usingVector) vfmv_f_s else Array[(BitPat, List[BitPat])]())
val decoder = DecodeLogic(io.inst, default, insns)
val s = io.sigs
val sigs = Seq(s.ldst, s.wen, s.ren1, s.ren2, s.ren3, s.swap12,
s.swap23, s.typeTagIn, s.typeTagOut, s.fromint, s.toint,
s.fastpipe, s.fma, s.div, s.sqrt, s.wflags, s.vec)
sigs zip decoder map {case(s,d) => s := d}
}
class FPUCoreIO(implicit p: Parameters) extends CoreBundle()(p) {
val hartid = Input(UInt(hartIdLen.W))
val time = Input(UInt(xLen.W))
val inst = Input(Bits(32.W))
val fromint_data = Input(Bits(xLen.W))
val fcsr_rm = Input(Bits(FPConstants.RM_SZ.W))
val fcsr_flags = Valid(Bits(FPConstants.FLAGS_SZ.W))
val v_sew = Input(UInt(3.W))
val store_data = Output(Bits(fLen.W))
val toint_data = Output(Bits(xLen.W))
val ll_resp_val = Input(Bool())
val ll_resp_type = Input(Bits(3.W))
val ll_resp_tag = Input(UInt(5.W))
val ll_resp_data = Input(Bits(fLen.W))
val valid = Input(Bool())
val fcsr_rdy = Output(Bool())
val nack_mem = Output(Bool())
val illegal_rm = Output(Bool())
val killx = Input(Bool())
val killm = Input(Bool())
val dec = Output(new FPUCtrlSigs())
val sboard_set = Output(Bool())
val sboard_clr = Output(Bool())
val sboard_clra = Output(UInt(5.W))
val keep_clock_enabled = Input(Bool())
}
class FPUIO(implicit p: Parameters) extends FPUCoreIO ()(p) {
val cp_req = Flipped(Decoupled(new FPInput())) //cp doesn't pay attn to kill sigs
val cp_resp = Decoupled(new FPResult())
}
class FPResult(implicit p: Parameters) extends CoreBundle()(p) {
val data = Bits((fLen+1).W)
val exc = Bits(FPConstants.FLAGS_SZ.W)
}
class IntToFPInput(implicit p: Parameters) extends CoreBundle()(p) with HasFPUCtrlSigs {
val rm = Bits(FPConstants.RM_SZ.W)
val typ = Bits(2.W)
val in1 = Bits(xLen.W)
}
class FPInput(implicit p: Parameters) extends CoreBundle()(p) with HasFPUCtrlSigs {
val rm = Bits(FPConstants.RM_SZ.W)
val fmaCmd = Bits(2.W)
val typ = Bits(2.W)
val fmt = Bits(2.W)
val in1 = Bits((fLen+1).W)
val in2 = Bits((fLen+1).W)
val in3 = Bits((fLen+1).W)
}
case class FType(exp: Int, sig: Int) {
def ieeeWidth = exp + sig
def recodedWidth = ieeeWidth + 1
def ieeeQNaN = ((BigInt(1) << (ieeeWidth - 1)) - (BigInt(1) << (sig - 2))).U(ieeeWidth.W)
def qNaN = ((BigInt(7) << (exp + sig - 3)) + (BigInt(1) << (sig - 2))).U(recodedWidth.W)
def isNaN(x: UInt) = x(sig + exp - 1, sig + exp - 3).andR
def isSNaN(x: UInt) = isNaN(x) && !x(sig - 2)
def classify(x: UInt) = {
val sign = x(sig + exp)
val code = x(exp + sig - 1, exp + sig - 3)
val codeHi = code(2, 1)
val isSpecial = codeHi === 3.U
val isHighSubnormalIn = x(exp + sig - 3, sig - 1) < 2.U
val isSubnormal = code === 1.U || codeHi === 1.U && isHighSubnormalIn
val isNormal = codeHi === 1.U && !isHighSubnormalIn || codeHi === 2.U
val isZero = code === 0.U
val isInf = isSpecial && !code(0)
val isNaN = code.andR
val isSNaN = isNaN && !x(sig-2)
val isQNaN = isNaN && x(sig-2)
Cat(isQNaN, isSNaN, isInf && !sign, isNormal && !sign,
isSubnormal && !sign, isZero && !sign, isZero && sign,
isSubnormal && sign, isNormal && sign, isInf && sign)
}
// convert between formats, ignoring rounding, range, NaN
def unsafeConvert(x: UInt, to: FType) = if (this == to) x else {
val sign = x(sig + exp)
val fractIn = x(sig - 2, 0)
val expIn = x(sig + exp - 1, sig - 1)
val fractOut = fractIn << to.sig >> sig
val expOut = {
val expCode = expIn(exp, exp - 2)
val commonCase = (expIn + (1 << to.exp).U) - (1 << exp).U
Mux(expCode === 0.U || expCode >= 6.U, Cat(expCode, commonCase(to.exp - 3, 0)), commonCase(to.exp, 0))
}
Cat(sign, expOut, fractOut)
}
private def ieeeBundle = {
val expWidth = exp
class IEEEBundle extends Bundle {
val sign = Bool()
val exp = UInt(expWidth.W)
val sig = UInt((ieeeWidth-expWidth-1).W)
}
new IEEEBundle
}
def unpackIEEE(x: UInt) = x.asTypeOf(ieeeBundle)
def recode(x: UInt) = hardfloat.recFNFromFN(exp, sig, x)
def ieee(x: UInt) = hardfloat.fNFromRecFN(exp, sig, x)
}
object FType {
val H = new FType(5, 11)
val S = new FType(8, 24)
val D = new FType(11, 53)
val all = List(H, S, D)
}
trait HasFPUParameters {
require(fLen == 0 || FType.all.exists(_.ieeeWidth == fLen))
val minFLen: Int
val fLen: Int
def xLen: Int
val minXLen = 32
val nIntTypes = log2Ceil(xLen/minXLen) + 1
def floatTypes = FType.all.filter(t => minFLen <= t.ieeeWidth && t.ieeeWidth <= fLen)
def minType = floatTypes.head
def maxType = floatTypes.last
def prevType(t: FType) = floatTypes(typeTag(t) - 1)
def maxExpWidth = maxType.exp
def maxSigWidth = maxType.sig
def typeTag(t: FType) = floatTypes.indexOf(t)
def typeTagWbOffset = (FType.all.indexOf(minType) + 1).U
def typeTagGroup(t: FType) = (if (floatTypes.contains(t)) typeTag(t) else typeTag(maxType)).U
// typeTag
def H = typeTagGroup(FType.H)
def S = typeTagGroup(FType.S)
def D = typeTagGroup(FType.D)
def I = typeTag(maxType).U
private def isBox(x: UInt, t: FType): Bool = x(t.sig + t.exp, t.sig + t.exp - 4).andR
private def box(x: UInt, xt: FType, y: UInt, yt: FType): UInt = {
require(xt.ieeeWidth == 2 * yt.ieeeWidth)
val swizzledNaN = Cat(
x(xt.sig + xt.exp, xt.sig + xt.exp - 3),
x(xt.sig - 2, yt.recodedWidth - 1).andR,
x(xt.sig + xt.exp - 5, xt.sig),
y(yt.recodedWidth - 2),
x(xt.sig - 2, yt.recodedWidth - 1),
y(yt.recodedWidth - 1),
y(yt.recodedWidth - 3, 0))
Mux(xt.isNaN(x), swizzledNaN, x)
}
// implement NaN unboxing for FU inputs
def unbox(x: UInt, tag: UInt, exactType: Option[FType]): UInt = {
val outType = exactType.getOrElse(maxType)
def helper(x: UInt, t: FType): Seq[(Bool, UInt)] = {
val prev =
if (t == minType) {
Seq()
} else {
val prevT = prevType(t)
val unswizzled = Cat(
x(prevT.sig + prevT.exp - 1),
x(t.sig - 1),
x(prevT.sig + prevT.exp - 2, 0))
val prev = helper(unswizzled, prevT)
val isbox = isBox(x, t)
prev.map(p => (isbox && p._1, p._2))
}
prev :+ (true.B, t.unsafeConvert(x, outType))
}
val (oks, floats) = helper(x, maxType).unzip
if (exactType.isEmpty || floatTypes.size == 1) {
Mux(oks(tag), floats(tag), maxType.qNaN)
} else {
val t = exactType.get
floats(typeTag(t)) | Mux(oks(typeTag(t)), 0.U, t.qNaN)
}
}
// make sure that the redundant bits in the NaN-boxed encoding are consistent
def consistent(x: UInt): Bool = {
def helper(x: UInt, t: FType): Bool = if (typeTag(t) == 0) true.B else {
val prevT = prevType(t)
val unswizzled = Cat(
x(prevT.sig + prevT.exp - 1),
x(t.sig - 1),
x(prevT.sig + prevT.exp - 2, 0))
val prevOK = !isBox(x, t) || helper(unswizzled, prevT)
val curOK = !t.isNaN(x) || x(t.sig + t.exp - 4) === x(t.sig - 2, prevT.recodedWidth - 1).andR
prevOK && curOK
}
helper(x, maxType)
}
// generate a NaN box from an FU result
def box(x: UInt, t: FType): UInt = {
if (t == maxType) {
x
} else {
val nt = floatTypes(typeTag(t) + 1)
val bigger = box(((BigInt(1) << nt.recodedWidth)-1).U, nt, x, t)
bigger | ((BigInt(1) << maxType.recodedWidth) - (BigInt(1) << nt.recodedWidth)).U
}
}
// generate a NaN box from an FU result
def box(x: UInt, tag: UInt): UInt = {
val opts = floatTypes.map(t => box(x, t))
opts(tag)
}
// zap bits that hardfloat thinks are don't-cares, but we do care about
def sanitizeNaN(x: UInt, t: FType): UInt = {
if (typeTag(t) == 0) {
x
} else {
val maskedNaN = x & ~((BigInt(1) << (t.sig-1)) | (BigInt(1) << (t.sig+t.exp-4))).U(t.recodedWidth.W)
Mux(t.isNaN(x), maskedNaN, x)
}
}
// implement NaN boxing and recoding for FL*/fmv.*.x
def recode(x: UInt, tag: UInt): UInt = {
def helper(x: UInt, t: FType): UInt = {
if (typeTag(t) == 0) {
t.recode(x)
} else {
val prevT = prevType(t)
box(t.recode(x), t, helper(x, prevT), prevT)
}
}
// fill MSBs of subword loads to emulate a wider load of a NaN-boxed value
val boxes = floatTypes.map(t => ((BigInt(1) << maxType.ieeeWidth) - (BigInt(1) << t.ieeeWidth)).U)
helper(boxes(tag) | x, maxType)
}
// implement NaN unboxing and un-recoding for FS*/fmv.x.*
def ieee(x: UInt, t: FType = maxType): UInt = {
if (typeTag(t) == 0) {
t.ieee(x)
} else {
val unrecoded = t.ieee(x)
val prevT = prevType(t)
val prevRecoded = Cat(
x(prevT.recodedWidth-2),
x(t.sig-1),
x(prevT.recodedWidth-3, 0))
val prevUnrecoded = ieee(prevRecoded, prevT)
Cat(unrecoded >> prevT.ieeeWidth, Mux(t.isNaN(x), prevUnrecoded, unrecoded(prevT.ieeeWidth-1, 0)))
}
}
}
abstract class FPUModule(implicit val p: Parameters) extends Module with HasCoreParameters with HasFPUParameters
class FPToInt(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
class Output extends Bundle {
val in = new FPInput
val lt = Bool()
val store = Bits(fLen.W)
val toint = Bits(xLen.W)
val exc = Bits(FPConstants.FLAGS_SZ.W)
}
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new Output)
})
val in = RegEnable(io.in.bits, io.in.valid)
val valid = RegNext(io.in.valid)
val dcmp = Module(new hardfloat.CompareRecFN(maxExpWidth, maxSigWidth))
dcmp.io.a := in.in1
dcmp.io.b := in.in2
dcmp.io.signaling := !in.rm(1)
val tag = in.typeTagOut
val toint_ieee = (floatTypes.map(t => if (t == FType.H) Fill(maxType.ieeeWidth / minXLen, ieee(in.in1)(15, 0).sextTo(minXLen))
else Fill(maxType.ieeeWidth / t.ieeeWidth, ieee(in.in1)(t.ieeeWidth - 1, 0))): Seq[UInt])(tag)
val toint = WireDefault(toint_ieee)
val intType = WireDefault(in.fmt(0))
io.out.bits.store := (floatTypes.map(t => Fill(fLen / t.ieeeWidth, ieee(in.in1)(t.ieeeWidth - 1, 0))): Seq[UInt])(tag)
io.out.bits.toint := ((0 until nIntTypes).map(i => toint((minXLen << i) - 1, 0).sextTo(xLen)): Seq[UInt])(intType)
io.out.bits.exc := 0.U
when (in.rm(0)) {
val classify_out = (floatTypes.map(t => t.classify(maxType.unsafeConvert(in.in1, t))): Seq[UInt])(tag)
toint := classify_out | (toint_ieee >> minXLen << minXLen)
intType := false.B
}
when (in.wflags) { // feq/flt/fle, fcvt
toint := (~in.rm & Cat(dcmp.io.lt, dcmp.io.eq)).orR | (toint_ieee >> minXLen << minXLen)
io.out.bits.exc := dcmp.io.exceptionFlags
intType := false.B
when (!in.ren2) { // fcvt
val cvtType = in.typ.extract(log2Ceil(nIntTypes), 1)
intType := cvtType
val conv = Module(new hardfloat.RecFNToIN(maxExpWidth, maxSigWidth, xLen))
conv.io.in := in.in1
conv.io.roundingMode := in.rm
conv.io.signedOut := ~in.typ(0)
toint := conv.io.out
io.out.bits.exc := Cat(conv.io.intExceptionFlags(2, 1).orR, 0.U(3.W), conv.io.intExceptionFlags(0))
for (i <- 0 until nIntTypes-1) {
val w = minXLen << i
when (cvtType === i.U) {
val narrow = Module(new hardfloat.RecFNToIN(maxExpWidth, maxSigWidth, w))
narrow.io.in := in.in1
narrow.io.roundingMode := in.rm
narrow.io.signedOut := ~in.typ(0)
val excSign = in.in1(maxExpWidth + maxSigWidth) && !maxType.isNaN(in.in1)
val excOut = Cat(conv.io.signedOut === excSign, Fill(w-1, !excSign))
val invalid = conv.io.intExceptionFlags(2) || narrow.io.intExceptionFlags(1)
when (invalid) { toint := Cat(conv.io.out >> w, excOut) }
io.out.bits.exc := Cat(invalid, 0.U(3.W), !invalid && conv.io.intExceptionFlags(0))
}
}
}
}
io.out.valid := valid
io.out.bits.lt := dcmp.io.lt || (dcmp.io.a.asSInt < 0.S && dcmp.io.b.asSInt >= 0.S)
io.out.bits.in := in
}
class IntToFP(val latency: Int)(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
val io = IO(new Bundle {
val in = Flipped(Valid(new IntToFPInput))
val out = Valid(new FPResult)
})
val in = Pipe(io.in)
val tag = in.bits.typeTagIn
val mux = Wire(new FPResult)
mux.exc := 0.U
mux.data := recode(in.bits.in1, tag)
val intValue = {
val res = WireDefault(in.bits.in1.asSInt)
for (i <- 0 until nIntTypes-1) {
val smallInt = in.bits.in1((minXLen << i) - 1, 0)
when (in.bits.typ.extract(log2Ceil(nIntTypes), 1) === i.U) {
res := Mux(in.bits.typ(0), smallInt.zext, smallInt.asSInt)
}
}
res.asUInt
}
when (in.bits.wflags) { // fcvt
// could be improved for RVD/RVQ with a single variable-position rounding
// unit, rather than N fixed-position ones
val i2fResults = for (t <- floatTypes) yield {
val i2f = Module(new hardfloat.INToRecFN(xLen, t.exp, t.sig))
i2f.io.signedIn := ~in.bits.typ(0)
i2f.io.in := intValue
i2f.io.roundingMode := in.bits.rm
i2f.io.detectTininess := hardfloat.consts.tininess_afterRounding
(sanitizeNaN(i2f.io.out, t), i2f.io.exceptionFlags)
}
val (data, exc) = i2fResults.unzip
val dataPadded = data.init.map(d => Cat(data.last >> d.getWidth, d)) :+ data.last
mux.data := dataPadded(tag)
mux.exc := exc(tag)
}
io.out <> Pipe(in.valid, mux, latency-1)
}
class FPToFP(val latency: Int)(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new FPResult)
val lt = Input(Bool()) // from FPToInt
})
val in = Pipe(io.in)
val signNum = Mux(in.bits.rm(1), in.bits.in1 ^ in.bits.in2, Mux(in.bits.rm(0), ~in.bits.in2, in.bits.in2))
val fsgnj = Cat(signNum(fLen), in.bits.in1(fLen-1, 0))
val fsgnjMux = Wire(new FPResult)
fsgnjMux.exc := 0.U
fsgnjMux.data := fsgnj
when (in.bits.wflags) { // fmin/fmax
val isnan1 = maxType.isNaN(in.bits.in1)
val isnan2 = maxType.isNaN(in.bits.in2)
val isInvalid = maxType.isSNaN(in.bits.in1) || maxType.isSNaN(in.bits.in2)
val isNaNOut = isnan1 && isnan2
val isLHS = isnan2 || in.bits.rm(0) =/= io.lt && !isnan1
fsgnjMux.exc := isInvalid << 4
fsgnjMux.data := Mux(isNaNOut, maxType.qNaN, Mux(isLHS, in.bits.in1, in.bits.in2))
}
val inTag = in.bits.typeTagIn
val outTag = in.bits.typeTagOut
val mux = WireDefault(fsgnjMux)
for (t <- floatTypes.init) {
when (outTag === typeTag(t).U) {
mux.data := Cat(fsgnjMux.data >> t.recodedWidth, maxType.unsafeConvert(fsgnjMux.data, t))
}
}
when (in.bits.wflags && !in.bits.ren2) { // fcvt
if (floatTypes.size > 1) {
// widening conversions simply canonicalize NaN operands
val widened = Mux(maxType.isNaN(in.bits.in1), maxType.qNaN, in.bits.in1)
fsgnjMux.data := widened
fsgnjMux.exc := maxType.isSNaN(in.bits.in1) << 4
// narrowing conversions require rounding (for RVQ, this could be
// optimized to use a single variable-position rounding unit, rather
// than two fixed-position ones)
for (outType <- floatTypes.init) when (outTag === typeTag(outType).U && ((typeTag(outType) == 0).B || outTag < inTag)) {
val narrower = Module(new hardfloat.RecFNToRecFN(maxType.exp, maxType.sig, outType.exp, outType.sig))
narrower.io.in := in.bits.in1
narrower.io.roundingMode := in.bits.rm
narrower.io.detectTininess := hardfloat.consts.tininess_afterRounding
val narrowed = sanitizeNaN(narrower.io.out, outType)
mux.data := Cat(fsgnjMux.data >> narrowed.getWidth, narrowed)
mux.exc := narrower.io.exceptionFlags
}
}
}
io.out <> Pipe(in.valid, mux, latency-1)
}
class MulAddRecFNPipe(latency: Int, expWidth: Int, sigWidth: Int) extends Module
{
override def desiredName = s"MulAddRecFNPipe_l${latency}_e${expWidth}_s${sigWidth}"
require(latency<=2)
val io = IO(new Bundle {
val validin = Input(Bool())
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
val validout = Output(Bool())
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul = Module(new hardfloat.MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul = Module(new hardfloat.MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
val valid_stage0 = Wire(Bool())
val roundingMode_stage0 = Wire(UInt(3.W))
val detectTininess_stage0 = Wire(UInt(1.W))
val postmul_regs = if(latency>0) 1 else 0
mulAddRecFNToRaw_postMul.io.fromPreMul := Pipe(io.validin, mulAddRecFNToRaw_preMul.io.toPostMul, postmul_regs).bits
mulAddRecFNToRaw_postMul.io.mulAddResult := Pipe(io.validin, mulAddResult, postmul_regs).bits
mulAddRecFNToRaw_postMul.io.roundingMode := Pipe(io.validin, io.roundingMode, postmul_regs).bits
roundingMode_stage0 := Pipe(io.validin, io.roundingMode, postmul_regs).bits
detectTininess_stage0 := Pipe(io.validin, io.detectTininess, postmul_regs).bits
valid_stage0 := Pipe(io.validin, false.B, postmul_regs).valid
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN = Module(new hardfloat.RoundRawFNToRecFN(expWidth, sigWidth, 0))
val round_regs = if(latency==2) 1 else 0
roundRawFNToRecFN.io.invalidExc := Pipe(valid_stage0, mulAddRecFNToRaw_postMul.io.invalidExc, round_regs).bits
roundRawFNToRecFN.io.in := Pipe(valid_stage0, mulAddRecFNToRaw_postMul.io.rawOut, round_regs).bits
roundRawFNToRecFN.io.roundingMode := Pipe(valid_stage0, roundingMode_stage0, round_regs).bits
roundRawFNToRecFN.io.detectTininess := Pipe(valid_stage0, detectTininess_stage0, round_regs).bits
io.validout := Pipe(valid_stage0, false.B, round_regs).valid
roundRawFNToRecFN.io.infiniteExc := false.B
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
class FPUFMAPipe(val latency: Int, val t: FType)
(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
override def desiredName = s"FPUFMAPipe_l${latency}_f${t.ieeeWidth}"
require(latency>0)
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new FPResult)
})
val valid = RegNext(io.in.valid)
val in = Reg(new FPInput)
when (io.in.valid) {
val one = 1.U << (t.sig + t.exp - 1)
val zero = (io.in.bits.in1 ^ io.in.bits.in2) & (1.U << (t.sig + t.exp))
val cmd_fma = io.in.bits.ren3
val cmd_addsub = io.in.bits.swap23
in := io.in.bits
when (cmd_addsub) { in.in2 := one }
when (!(cmd_fma || cmd_addsub)) { in.in3 := zero }
}
val fma = Module(new MulAddRecFNPipe((latency-1) min 2, t.exp, t.sig))
fma.io.validin := valid
fma.io.op := in.fmaCmd
fma.io.roundingMode := in.rm
fma.io.detectTininess := hardfloat.consts.tininess_afterRounding
fma.io.a := in.in1
fma.io.b := in.in2
fma.io.c := in.in3
val res = Wire(new FPResult)
res.data := sanitizeNaN(fma.io.out, t)
res.exc := fma.io.exceptionFlags
io.out := Pipe(fma.io.validout, res, (latency-3) max 0)
}
class FPU(cfg: FPUParams)(implicit p: Parameters) extends FPUModule()(p) {
val io = IO(new FPUIO)
val (useClockGating, useDebugROB) = coreParams match {
case r: RocketCoreParams =>
val sz = if (r.debugROB.isDefined) r.debugROB.get.size else 1
(r.clockGate, sz < 1)
case _ => (false, false)
}
val clock_en_reg = Reg(Bool())
val clock_en = clock_en_reg || io.cp_req.valid
val gated_clock =
if (!useClockGating) clock
else ClockGate(clock, clock_en, "fpu_clock_gate")
val fp_decoder = Module(new FPUDecoder)
fp_decoder.io.inst := io.inst
val id_ctrl = WireInit(fp_decoder.io.sigs)
coreParams match { case r: RocketCoreParams => r.vector.map(v => {
val v_decode = v.decoder(p) // Only need to get ren1
v_decode.io.inst := io.inst
v_decode.io.vconfig := DontCare // core deals with this
when (v_decode.io.legal && v_decode.io.read_frs1) {
id_ctrl.ren1 := true.B
id_ctrl.swap12 := false.B
id_ctrl.toint := true.B
id_ctrl.typeTagIn := I
id_ctrl.typeTagOut := Mux(io.v_sew === 3.U, D, S)
}
when (v_decode.io.write_frd) { id_ctrl.wen := true.B }
})}
val ex_reg_valid = RegNext(io.valid, false.B)
val ex_reg_inst = RegEnable(io.inst, io.valid)
val ex_reg_ctrl = RegEnable(id_ctrl, io.valid)
val ex_ra = List.fill(3)(Reg(UInt()))
// load/vector response
val load_wb = RegNext(io.ll_resp_val)
val load_wb_typeTag = RegEnable(io.ll_resp_type(1,0) - typeTagWbOffset, io.ll_resp_val)
val load_wb_data = RegEnable(io.ll_resp_data, io.ll_resp_val)
val load_wb_tag = RegEnable(io.ll_resp_tag, io.ll_resp_val)
class FPUImpl { // entering gated-clock domain
val req_valid = ex_reg_valid || io.cp_req.valid
val ex_cp_valid = io.cp_req.fire
val mem_cp_valid = RegNext(ex_cp_valid, false.B)
val wb_cp_valid = RegNext(mem_cp_valid, false.B)
val mem_reg_valid = RegInit(false.B)
val killm = (io.killm || io.nack_mem) && !mem_cp_valid
// Kill X-stage instruction if M-stage is killed. This prevents it from
// speculatively being sent to the div-sqrt unit, which can cause priority
// inversion for two back-to-back divides, the first of which is killed.
val killx = io.killx || mem_reg_valid && killm
mem_reg_valid := ex_reg_valid && !killx || ex_cp_valid
val mem_reg_inst = RegEnable(ex_reg_inst, ex_reg_valid)
val wb_reg_valid = RegNext(mem_reg_valid && (!killm || mem_cp_valid), false.B)
val cp_ctrl = Wire(new FPUCtrlSigs)
cp_ctrl :<>= io.cp_req.bits.viewAsSupertype(new FPUCtrlSigs)
io.cp_resp.valid := false.B
io.cp_resp.bits.data := 0.U
io.cp_resp.bits.exc := DontCare
val ex_ctrl = Mux(ex_cp_valid, cp_ctrl, ex_reg_ctrl)
val mem_ctrl = RegEnable(ex_ctrl, req_valid)
val wb_ctrl = RegEnable(mem_ctrl, mem_reg_valid)
// CoreMonitorBundle to monitor fp register file writes
val frfWriteBundle = Seq.fill(2)(WireInit(new CoreMonitorBundle(xLen, fLen), DontCare))
frfWriteBundle.foreach { i =>
i.clock := clock
i.reset := reset
i.hartid := io.hartid
i.timer := io.time(31,0)
i.valid := false.B
i.wrenx := false.B
i.wrenf := false.B
i.excpt := false.B
}
// regfile
val regfile = Mem(32, Bits((fLen+1).W))
when (load_wb) {
val wdata = recode(load_wb_data, load_wb_typeTag)
regfile(load_wb_tag) := wdata
assert(consistent(wdata))
if (enableCommitLog)
printf("f%d p%d 0x%x\n", load_wb_tag, load_wb_tag + 32.U, ieee(wdata))
if (useDebugROB)
DebugROB.pushWb(clock, reset, io.hartid, load_wb, load_wb_tag + 32.U, ieee(wdata))
frfWriteBundle(0).wrdst := load_wb_tag
frfWriteBundle(0).wrenf := true.B
frfWriteBundle(0).wrdata := ieee(wdata)
}
val ex_rs = ex_ra.map(a => regfile(a))
when (io.valid) {
when (id_ctrl.ren1) {
when (!id_ctrl.swap12) { ex_ra(0) := io.inst(19,15) }
when (id_ctrl.swap12) { ex_ra(1) := io.inst(19,15) }
}
when (id_ctrl.ren2) {
when (id_ctrl.swap12) { ex_ra(0) := io.inst(24,20) }
when (id_ctrl.swap23) { ex_ra(2) := io.inst(24,20) }
when (!id_ctrl.swap12 && !id_ctrl.swap23) { ex_ra(1) := io.inst(24,20) }
}
when (id_ctrl.ren3) { ex_ra(2) := io.inst(31,27) }
}
val ex_rm = Mux(ex_reg_inst(14,12) === 7.U, io.fcsr_rm, ex_reg_inst(14,12))
def fuInput(minT: Option[FType]): FPInput = {
val req = Wire(new FPInput)
val tag = ex_ctrl.typeTagIn
req.viewAsSupertype(new Bundle with HasFPUCtrlSigs) :#= ex_ctrl.viewAsSupertype(new Bundle with HasFPUCtrlSigs)
req.rm := ex_rm
req.in1 := unbox(ex_rs(0), tag, minT)
req.in2 := unbox(ex_rs(1), tag, minT)
req.in3 := unbox(ex_rs(2), tag, minT)
req.typ := ex_reg_inst(21,20)
req.fmt := ex_reg_inst(26,25)
req.fmaCmd := ex_reg_inst(3,2) | (!ex_ctrl.ren3 && ex_reg_inst(27))
when (ex_cp_valid) {
req := io.cp_req.bits
when (io.cp_req.bits.swap12) {
req.in1 := io.cp_req.bits.in2
req.in2 := io.cp_req.bits.in1
}
when (io.cp_req.bits.swap23) {
req.in2 := io.cp_req.bits.in3
req.in3 := io.cp_req.bits.in2
}
}
req
}
val sfma = Module(new FPUFMAPipe(cfg.sfmaLatency, FType.S))
sfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === S
sfma.io.in.bits := fuInput(Some(sfma.t))
val fpiu = Module(new FPToInt)
fpiu.io.in.valid := req_valid && (ex_ctrl.toint || ex_ctrl.div || ex_ctrl.sqrt || (ex_ctrl.fastpipe && ex_ctrl.wflags))
fpiu.io.in.bits := fuInput(None)
io.store_data := fpiu.io.out.bits.store
io.toint_data := fpiu.io.out.bits.toint
when(fpiu.io.out.valid && mem_cp_valid && mem_ctrl.toint){
io.cp_resp.bits.data := fpiu.io.out.bits.toint
io.cp_resp.valid := true.B
}
val ifpu = Module(new IntToFP(cfg.ifpuLatency))
ifpu.io.in.valid := req_valid && ex_ctrl.fromint
ifpu.io.in.bits := fpiu.io.in.bits
ifpu.io.in.bits.in1 := Mux(ex_cp_valid, io.cp_req.bits.in1, io.fromint_data)
val fpmu = Module(new FPToFP(cfg.fpmuLatency))
fpmu.io.in.valid := req_valid && ex_ctrl.fastpipe
fpmu.io.in.bits := fpiu.io.in.bits
fpmu.io.lt := fpiu.io.out.bits.lt
val divSqrt_wen = WireDefault(false.B)
val divSqrt_inFlight = WireDefault(false.B)
val divSqrt_waddr = Reg(UInt(5.W))
val divSqrt_cp = Reg(Bool())
val divSqrt_typeTag = Wire(UInt(log2Up(floatTypes.size).W))
val divSqrt_wdata = Wire(UInt((fLen+1).W))
val divSqrt_flags = Wire(UInt(FPConstants.FLAGS_SZ.W))
divSqrt_typeTag := DontCare
divSqrt_wdata := DontCare
divSqrt_flags := DontCare
// writeback arbitration
case class Pipe(p: Module, lat: Int, cond: (FPUCtrlSigs) => Bool, res: FPResult)
val pipes = List(
Pipe(fpmu, fpmu.latency, (c: FPUCtrlSigs) => c.fastpipe, fpmu.io.out.bits),
Pipe(ifpu, ifpu.latency, (c: FPUCtrlSigs) => c.fromint, ifpu.io.out.bits),
Pipe(sfma, sfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === S, sfma.io.out.bits)) ++
(fLen > 32).option({
val dfma = Module(new FPUFMAPipe(cfg.dfmaLatency, FType.D))
dfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === D
dfma.io.in.bits := fuInput(Some(dfma.t))
Pipe(dfma, dfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === D, dfma.io.out.bits)
}) ++
(minFLen == 16).option({
val hfma = Module(new FPUFMAPipe(cfg.sfmaLatency, FType.H))
hfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === H
hfma.io.in.bits := fuInput(Some(hfma.t))
Pipe(hfma, hfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === H, hfma.io.out.bits)
})
def latencyMask(c: FPUCtrlSigs, offset: Int) = {
require(pipes.forall(_.lat >= offset))
pipes.map(p => Mux(p.cond(c), (1 << p.lat-offset).U, 0.U)).reduce(_|_)
}
def pipeid(c: FPUCtrlSigs) = pipes.zipWithIndex.map(p => Mux(p._1.cond(c), p._2.U, 0.U)).reduce(_|_)
val maxLatency = pipes.map(_.lat).max
val memLatencyMask = latencyMask(mem_ctrl, 2)
class WBInfo extends Bundle {
val rd = UInt(5.W)
val typeTag = UInt(log2Up(floatTypes.size).W)
val cp = Bool()
val pipeid = UInt(log2Ceil(pipes.size).W)
}
val wen = RegInit(0.U((maxLatency-1).W))
val wbInfo = Reg(Vec(maxLatency-1, new WBInfo))
val mem_wen = mem_reg_valid && (mem_ctrl.fma || mem_ctrl.fastpipe || mem_ctrl.fromint)
val write_port_busy = RegEnable(mem_wen && (memLatencyMask & latencyMask(ex_ctrl, 1)).orR || (wen & latencyMask(ex_ctrl, 0)).orR, req_valid)
ccover(mem_reg_valid && write_port_busy, "WB_STRUCTURAL", "structural hazard on writeback")
for (i <- 0 until maxLatency-2) {
when (wen(i+1)) { wbInfo(i) := wbInfo(i+1) }
}
wen := wen >> 1
when (mem_wen) {
when (!killm) {
wen := wen >> 1 | memLatencyMask
}
for (i <- 0 until maxLatency-1) {
when (!write_port_busy && memLatencyMask(i)) {
wbInfo(i).cp := mem_cp_valid
wbInfo(i).typeTag := mem_ctrl.typeTagOut
wbInfo(i).pipeid := pipeid(mem_ctrl)
wbInfo(i).rd := mem_reg_inst(11,7)
}
}
}
val waddr = Mux(divSqrt_wen, divSqrt_waddr, wbInfo(0).rd)
val wb_cp = Mux(divSqrt_wen, divSqrt_cp, wbInfo(0).cp)
val wtypeTag = Mux(divSqrt_wen, divSqrt_typeTag, wbInfo(0).typeTag)
val wdata = box(Mux(divSqrt_wen, divSqrt_wdata, (pipes.map(_.res.data): Seq[UInt])(wbInfo(0).pipeid)), wtypeTag)
val wexc = (pipes.map(_.res.exc): Seq[UInt])(wbInfo(0).pipeid)
when ((!wbInfo(0).cp && wen(0)) || divSqrt_wen) {
assert(consistent(wdata))
regfile(waddr) := wdata
if (enableCommitLog) {
printf("f%d p%d 0x%x\n", waddr, waddr + 32.U, ieee(wdata))
}
frfWriteBundle(1).wrdst := waddr
frfWriteBundle(1).wrenf := true.B
frfWriteBundle(1).wrdata := ieee(wdata)
}
if (useDebugROB) {
DebugROB.pushWb(clock, reset, io.hartid, (!wbInfo(0).cp && wen(0)) || divSqrt_wen, waddr + 32.U, ieee(wdata))
}
when (wb_cp && (wen(0) || divSqrt_wen)) {
io.cp_resp.bits.data := wdata
io.cp_resp.valid := true.B
}
assert(!io.cp_req.valid || pipes.forall(_.lat == pipes.head.lat).B,
s"FPU only supports coprocessor if FMA pipes have uniform latency ${pipes.map(_.lat)}")
// Avoid structural hazards and nacking of external requests
// toint responds in the MEM stage, so an incoming toint can induce a structural hazard against inflight FMAs
io.cp_req.ready := !ex_reg_valid && !(cp_ctrl.toint && wen =/= 0.U) && !divSqrt_inFlight
val wb_toint_valid = wb_reg_valid && wb_ctrl.toint
val wb_toint_exc = RegEnable(fpiu.io.out.bits.exc, mem_ctrl.toint)
io.fcsr_flags.valid := wb_toint_valid || divSqrt_wen || wen(0)
io.fcsr_flags.bits :=
Mux(wb_toint_valid, wb_toint_exc, 0.U) |
Mux(divSqrt_wen, divSqrt_flags, 0.U) |
Mux(wen(0), wexc, 0.U)
val divSqrt_write_port_busy = (mem_ctrl.div || mem_ctrl.sqrt) && wen.orR
io.fcsr_rdy := !(ex_reg_valid && ex_ctrl.wflags || mem_reg_valid && mem_ctrl.wflags || wb_reg_valid && wb_ctrl.toint || wen.orR || divSqrt_inFlight)
io.nack_mem := (write_port_busy || divSqrt_write_port_busy || divSqrt_inFlight) && !mem_cp_valid
io.dec <> id_ctrl
def useScoreboard(f: ((Pipe, Int)) => Bool) = pipes.zipWithIndex.filter(_._1.lat > 3).map(x => f(x)).fold(false.B)(_||_)
io.sboard_set := wb_reg_valid && !wb_cp_valid && RegNext(useScoreboard(_._1.cond(mem_ctrl)) || mem_ctrl.div || mem_ctrl.sqrt || mem_ctrl.vec)
io.sboard_clr := !wb_cp_valid && (divSqrt_wen || (wen(0) && useScoreboard(x => wbInfo(0).pipeid === x._2.U)))
io.sboard_clra := waddr
ccover(io.sboard_clr && load_wb, "DUAL_WRITEBACK", "load and FMA writeback on same cycle")
// we don't currently support round-max-magnitude (rm=4)
io.illegal_rm := io.inst(14,12).isOneOf(5.U, 6.U) || io.inst(14,12) === 7.U && io.fcsr_rm >= 5.U
if (cfg.divSqrt) {
val divSqrt_inValid = mem_reg_valid && (mem_ctrl.div || mem_ctrl.sqrt) && !divSqrt_inFlight
val divSqrt_killed = RegNext(divSqrt_inValid && killm, true.B)
when (divSqrt_inValid) {
divSqrt_waddr := mem_reg_inst(11,7)
divSqrt_cp := mem_cp_valid
}
ccover(divSqrt_inFlight && divSqrt_killed, "DIV_KILLED", "divide killed after issued to divider")
ccover(divSqrt_inFlight && mem_reg_valid && (mem_ctrl.div || mem_ctrl.sqrt), "DIV_BUSY", "divider structural hazard")
ccover(mem_reg_valid && divSqrt_write_port_busy, "DIV_WB_STRUCTURAL", "structural hazard on division writeback")
for (t <- floatTypes) {
val tag = mem_ctrl.typeTagOut
val divSqrt = withReset(divSqrt_killed) { Module(new hardfloat.DivSqrtRecFN_small(t.exp, t.sig, 0)) }
divSqrt.io.inValid := divSqrt_inValid && tag === typeTag(t).U
divSqrt.io.sqrtOp := mem_ctrl.sqrt
divSqrt.io.a := maxType.unsafeConvert(fpiu.io.out.bits.in.in1, t)
divSqrt.io.b := maxType.unsafeConvert(fpiu.io.out.bits.in.in2, t)
divSqrt.io.roundingMode := fpiu.io.out.bits.in.rm
divSqrt.io.detectTininess := hardfloat.consts.tininess_afterRounding
when (!divSqrt.io.inReady) { divSqrt_inFlight := true.B } // only 1 in flight
when (divSqrt.io.outValid_div || divSqrt.io.outValid_sqrt) {
divSqrt_wen := !divSqrt_killed
divSqrt_wdata := sanitizeNaN(divSqrt.io.out, t)
divSqrt_flags := divSqrt.io.exceptionFlags
divSqrt_typeTag := typeTag(t).U
}
}
when (divSqrt_killed) { divSqrt_inFlight := false.B }
} else {
when (id_ctrl.div || id_ctrl.sqrt) { io.illegal_rm := true.B }
}
// gate the clock
clock_en_reg := !useClockGating.B ||
io.keep_clock_enabled || // chicken bit
io.valid || // ID stage
req_valid || // EX stage
mem_reg_valid || mem_cp_valid || // MEM stage
wb_reg_valid || wb_cp_valid || // WB stage
wen.orR || divSqrt_inFlight || // post-WB stage
io.ll_resp_val // load writeback
} // leaving gated-clock domain
val fpuImpl = withClock (gated_clock) { new FPUImpl }
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
property.cover(cond, s"FPU_$label", "Core;;" + desc)
}
| module MulAddRecFNPipe_l2_e5_s11_2( // @[FPU.scala:633:7]
input clock, // @[FPU.scala:633:7]
input reset, // @[FPU.scala:633:7]
input io_validin, // @[FPU.scala:638:16]
input [1:0] io_op, // @[FPU.scala:638:16]
input [16:0] io_a, // @[FPU.scala:638:16]
input [16:0] io_b, // @[FPU.scala:638:16]
input [16:0] io_c, // @[FPU.scala:638:16]
input [2:0] io_roundingMode, // @[FPU.scala:638:16]
output [16:0] io_out, // @[FPU.scala:638:16]
output [4:0] io_exceptionFlags, // @[FPU.scala:638:16]
output io_validout // @[FPU.scala:638:16]
);
wire _mulAddRecFNToRaw_postMul_io_invalidExc; // @[FPU.scala:655:42]
wire _mulAddRecFNToRaw_postMul_io_rawOut_isNaN; // @[FPU.scala:655:42]
wire _mulAddRecFNToRaw_postMul_io_rawOut_isInf; // @[FPU.scala:655:42]
wire _mulAddRecFNToRaw_postMul_io_rawOut_isZero; // @[FPU.scala:655:42]
wire _mulAddRecFNToRaw_postMul_io_rawOut_sign; // @[FPU.scala:655:42]
wire [6:0] _mulAddRecFNToRaw_postMul_io_rawOut_sExp; // @[FPU.scala:655:42]
wire [13:0] _mulAddRecFNToRaw_postMul_io_rawOut_sig; // @[FPU.scala:655:42]
wire [10:0] _mulAddRecFNToRaw_preMul_io_mulAddA; // @[FPU.scala:654:41]
wire [10:0] _mulAddRecFNToRaw_preMul_io_mulAddB; // @[FPU.scala:654:41]
wire [21:0] _mulAddRecFNToRaw_preMul_io_mulAddC; // @[FPU.scala:654:41]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny; // @[FPU.scala:654:41]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB; // @[FPU.scala:654:41]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isInfA; // @[FPU.scala:654:41]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA; // @[FPU.scala:654:41]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isInfB; // @[FPU.scala:654:41]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroB; // @[FPU.scala:654:41]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_signProd; // @[FPU.scala:654:41]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isNaNC; // @[FPU.scala:654:41]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isInfC; // @[FPU.scala:654:41]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroC; // @[FPU.scala:654:41]
wire [6:0] _mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum; // @[FPU.scala:654:41]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags; // @[FPU.scala:654:41]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_CIsDominant; // @[FPU.scala:654:41]
wire [3:0] _mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist; // @[FPU.scala:654:41]
wire [12:0] _mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC; // @[FPU.scala:654:41]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC; // @[FPU.scala:654:41]
wire io_validin_0 = io_validin; // @[FPU.scala:633:7]
wire [1:0] io_op_0 = io_op; // @[FPU.scala:633:7]
wire [16:0] io_a_0 = io_a; // @[FPU.scala:633:7]
wire [16:0] io_b_0 = io_b; // @[FPU.scala:633:7]
wire [16:0] io_c_0 = io_c; // @[FPU.scala:633:7]
wire [2:0] io_roundingMode_0 = io_roundingMode; // @[FPU.scala:633:7]
wire io_detectTininess = 1'h1; // @[FPU.scala:633:7]
wire detectTininess_stage0 = 1'h1; // @[FPU.scala:669:37]
wire detectTininess_stage0_pipe_out_bits = 1'h1; // @[Valid.scala:135:21]
wire valid_stage0_pipe_out_bits = 1'h0; // @[Valid.scala:135:21]
wire io_validout_pipe_out_bits = 1'h0; // @[Valid.scala:135:21]
wire io_validout_pipe_out_valid; // @[Valid.scala:135:21]
wire [16:0] io_out_0; // @[FPU.scala:633:7]
wire [4:0] io_exceptionFlags_0; // @[FPU.scala:633:7]
wire io_validout_0; // @[FPU.scala:633:7]
wire [21:0] _mulAddResult_T = {11'h0, _mulAddRecFNToRaw_preMul_io_mulAddA} * {11'h0, _mulAddRecFNToRaw_preMul_io_mulAddB}; // @[FPU.scala:654:41, :663:45]
wire [22:0] mulAddResult = {1'h0, _mulAddResult_T} + {1'h0, _mulAddRecFNToRaw_preMul_io_mulAddC}; // @[FPU.scala:654:41, :663:45, :664:50]
wire valid_stage0_pipe_out_valid; // @[Valid.scala:135:21]
wire valid_stage0; // @[FPU.scala:667:28]
wire [2:0] roundingMode_stage0_pipe_out_bits; // @[Valid.scala:135:21]
wire [2:0] roundingMode_stage0; // @[FPU.scala:668:35]
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_v; // @[Valid.scala:141:24]
wire mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_valid = mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_v; // @[Valid.scala:135:21, :141:24]
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isSigNaNAny; // @[Valid.scala:142:26]
wire mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_isSigNaNAny = mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isSigNaNAny; // @[Valid.scala:135:21, :142:26]
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isNaNAOrB; // @[Valid.scala:142:26]
wire mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_isNaNAOrB = mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isNaNAOrB; // @[Valid.scala:135:21, :142:26]
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isInfA; // @[Valid.scala:142:26]
wire mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_isInfA = mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isInfA; // @[Valid.scala:135:21, :142:26]
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isZeroA; // @[Valid.scala:142:26]
wire mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_isZeroA = mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isZeroA; // @[Valid.scala:135:21, :142:26]
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isInfB; // @[Valid.scala:142:26]
wire mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_isInfB = mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isInfB; // @[Valid.scala:135:21, :142:26]
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isZeroB; // @[Valid.scala:142:26]
wire mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_isZeroB = mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isZeroB; // @[Valid.scala:135:21, :142:26]
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_signProd; // @[Valid.scala:142:26]
wire mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_signProd = mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_signProd; // @[Valid.scala:135:21, :142:26]
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isNaNC; // @[Valid.scala:142:26]
wire mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_isNaNC = mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isNaNC; // @[Valid.scala:135:21, :142:26]
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isInfC; // @[Valid.scala:142:26]
wire mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_isInfC = mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isInfC; // @[Valid.scala:135:21, :142:26]
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isZeroC; // @[Valid.scala:142:26]
wire mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_isZeroC = mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isZeroC; // @[Valid.scala:135:21, :142:26]
reg [6:0] mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_sExpSum; // @[Valid.scala:142:26]
wire [6:0] mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_sExpSum = mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_sExpSum; // @[Valid.scala:135:21, :142:26]
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_doSubMags; // @[Valid.scala:142:26]
wire mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_doSubMags = mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_doSubMags; // @[Valid.scala:135:21, :142:26]
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_CIsDominant; // @[Valid.scala:142:26]
wire mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_CIsDominant = mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_CIsDominant; // @[Valid.scala:135:21, :142:26]
reg [3:0] mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_CDom_CAlignDist; // @[Valid.scala:142:26]
wire [3:0] mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_CDom_CAlignDist = mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_CDom_CAlignDist; // @[Valid.scala:135:21, :142:26]
reg [12:0] mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_highAlignedSigC; // @[Valid.scala:142:26]
wire [12:0] mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_highAlignedSigC = mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_highAlignedSigC; // @[Valid.scala:135:21, :142:26]
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_bit0AlignedSigC; // @[Valid.scala:142:26]
wire mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_bit0AlignedSigC = mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_bit0AlignedSigC; // @[Valid.scala:135:21, :142:26]
reg mulAddRecFNToRaw_postMul_io_mulAddResult_pipe_v; // @[Valid.scala:141:24]
wire mulAddRecFNToRaw_postMul_io_mulAddResult_pipe_out_valid = mulAddRecFNToRaw_postMul_io_mulAddResult_pipe_v; // @[Valid.scala:135:21, :141:24]
reg [22:0] mulAddRecFNToRaw_postMul_io_mulAddResult_pipe_b; // @[Valid.scala:142:26]
wire [22:0] mulAddRecFNToRaw_postMul_io_mulAddResult_pipe_out_bits = mulAddRecFNToRaw_postMul_io_mulAddResult_pipe_b; // @[Valid.scala:135:21, :142:26]
reg mulAddRecFNToRaw_postMul_io_roundingMode_pipe_v; // @[Valid.scala:141:24]
wire mulAddRecFNToRaw_postMul_io_roundingMode_pipe_out_valid = mulAddRecFNToRaw_postMul_io_roundingMode_pipe_v; // @[Valid.scala:135:21, :141:24]
reg [2:0] mulAddRecFNToRaw_postMul_io_roundingMode_pipe_b; // @[Valid.scala:142:26]
wire [2:0] mulAddRecFNToRaw_postMul_io_roundingMode_pipe_out_bits = mulAddRecFNToRaw_postMul_io_roundingMode_pipe_b; // @[Valid.scala:135:21, :142:26]
reg roundingMode_stage0_pipe_v; // @[Valid.scala:141:24]
wire roundingMode_stage0_pipe_out_valid = roundingMode_stage0_pipe_v; // @[Valid.scala:135:21, :141:24]
reg [2:0] roundingMode_stage0_pipe_b; // @[Valid.scala:142:26]
assign roundingMode_stage0_pipe_out_bits = roundingMode_stage0_pipe_b; // @[Valid.scala:135:21, :142:26]
assign roundingMode_stage0 = roundingMode_stage0_pipe_out_bits; // @[Valid.scala:135:21]
reg detectTininess_stage0_pipe_v; // @[Valid.scala:141:24]
wire detectTininess_stage0_pipe_out_valid = detectTininess_stage0_pipe_v; // @[Valid.scala:135:21, :141:24]
reg valid_stage0_pipe_v; // @[Valid.scala:141:24]
assign valid_stage0_pipe_out_valid = valid_stage0_pipe_v; // @[Valid.scala:135:21, :141:24]
assign valid_stage0 = valid_stage0_pipe_out_valid; // @[Valid.scala:135:21]
reg roundRawFNToRecFN_io_invalidExc_pipe_v; // @[Valid.scala:141:24]
wire roundRawFNToRecFN_io_invalidExc_pipe_out_valid = roundRawFNToRecFN_io_invalidExc_pipe_v; // @[Valid.scala:135:21, :141:24]
reg roundRawFNToRecFN_io_invalidExc_pipe_b; // @[Valid.scala:142:26]
wire roundRawFNToRecFN_io_invalidExc_pipe_out_bits = roundRawFNToRecFN_io_invalidExc_pipe_b; // @[Valid.scala:135:21, :142:26]
reg roundRawFNToRecFN_io_in_pipe_v; // @[Valid.scala:141:24]
wire roundRawFNToRecFN_io_in_pipe_out_valid = roundRawFNToRecFN_io_in_pipe_v; // @[Valid.scala:135:21, :141:24]
reg roundRawFNToRecFN_io_in_pipe_b_isNaN; // @[Valid.scala:142:26]
wire roundRawFNToRecFN_io_in_pipe_out_bits_isNaN = roundRawFNToRecFN_io_in_pipe_b_isNaN; // @[Valid.scala:135:21, :142:26]
reg roundRawFNToRecFN_io_in_pipe_b_isInf; // @[Valid.scala:142:26]
wire roundRawFNToRecFN_io_in_pipe_out_bits_isInf = roundRawFNToRecFN_io_in_pipe_b_isInf; // @[Valid.scala:135:21, :142:26]
reg roundRawFNToRecFN_io_in_pipe_b_isZero; // @[Valid.scala:142:26]
wire roundRawFNToRecFN_io_in_pipe_out_bits_isZero = roundRawFNToRecFN_io_in_pipe_b_isZero; // @[Valid.scala:135:21, :142:26]
reg roundRawFNToRecFN_io_in_pipe_b_sign; // @[Valid.scala:142:26]
wire roundRawFNToRecFN_io_in_pipe_out_bits_sign = roundRawFNToRecFN_io_in_pipe_b_sign; // @[Valid.scala:135:21, :142:26]
reg [6:0] roundRawFNToRecFN_io_in_pipe_b_sExp; // @[Valid.scala:142:26]
wire [6:0] roundRawFNToRecFN_io_in_pipe_out_bits_sExp = roundRawFNToRecFN_io_in_pipe_b_sExp; // @[Valid.scala:135:21, :142:26]
reg [13:0] roundRawFNToRecFN_io_in_pipe_b_sig; // @[Valid.scala:142:26]
wire [13:0] roundRawFNToRecFN_io_in_pipe_out_bits_sig = roundRawFNToRecFN_io_in_pipe_b_sig; // @[Valid.scala:135:21, :142:26]
reg roundRawFNToRecFN_io_roundingMode_pipe_v; // @[Valid.scala:141:24]
wire roundRawFNToRecFN_io_roundingMode_pipe_out_valid = roundRawFNToRecFN_io_roundingMode_pipe_v; // @[Valid.scala:135:21, :141:24]
reg [2:0] roundRawFNToRecFN_io_roundingMode_pipe_b; // @[Valid.scala:142:26]
wire [2:0] roundRawFNToRecFN_io_roundingMode_pipe_out_bits = roundRawFNToRecFN_io_roundingMode_pipe_b; // @[Valid.scala:135:21, :142:26]
reg roundRawFNToRecFN_io_detectTininess_pipe_v; // @[Valid.scala:141:24]
wire roundRawFNToRecFN_io_detectTininess_pipe_out_valid = roundRawFNToRecFN_io_detectTininess_pipe_v; // @[Valid.scala:135:21, :141:24]
reg roundRawFNToRecFN_io_detectTininess_pipe_b; // @[Valid.scala:142:26]
wire roundRawFNToRecFN_io_detectTininess_pipe_out_bits = roundRawFNToRecFN_io_detectTininess_pipe_b; // @[Valid.scala:135:21, :142:26]
reg io_validout_pipe_v; // @[Valid.scala:141:24]
assign io_validout_pipe_out_valid = io_validout_pipe_v; // @[Valid.scala:135:21, :141:24]
assign io_validout_0 = io_validout_pipe_out_valid; // @[Valid.scala:135:21]
always @(posedge clock) begin // @[FPU.scala:633:7]
if (reset) begin // @[FPU.scala:633:7]
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_v <= 1'h0; // @[Valid.scala:141:24]
mulAddRecFNToRaw_postMul_io_mulAddResult_pipe_v <= 1'h0; // @[Valid.scala:141:24]
mulAddRecFNToRaw_postMul_io_roundingMode_pipe_v <= 1'h0; // @[Valid.scala:141:24]
roundingMode_stage0_pipe_v <= 1'h0; // @[Valid.scala:141:24]
detectTininess_stage0_pipe_v <= 1'h0; // @[Valid.scala:141:24]
valid_stage0_pipe_v <= 1'h0; // @[Valid.scala:141:24]
roundRawFNToRecFN_io_invalidExc_pipe_v <= 1'h0; // @[Valid.scala:141:24]
roundRawFNToRecFN_io_in_pipe_v <= 1'h0; // @[Valid.scala:141:24]
roundRawFNToRecFN_io_roundingMode_pipe_v <= 1'h0; // @[Valid.scala:141:24]
roundRawFNToRecFN_io_detectTininess_pipe_v <= 1'h0; // @[Valid.scala:141:24]
io_validout_pipe_v <= 1'h0; // @[Valid.scala:141:24]
end
else begin // @[FPU.scala:633:7]
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_v <= io_validin_0; // @[Valid.scala:141:24]
mulAddRecFNToRaw_postMul_io_mulAddResult_pipe_v <= io_validin_0; // @[Valid.scala:141:24]
mulAddRecFNToRaw_postMul_io_roundingMode_pipe_v <= io_validin_0; // @[Valid.scala:141:24]
roundingMode_stage0_pipe_v <= io_validin_0; // @[Valid.scala:141:24]
detectTininess_stage0_pipe_v <= io_validin_0; // @[Valid.scala:141:24]
valid_stage0_pipe_v <= io_validin_0; // @[Valid.scala:141:24]
roundRawFNToRecFN_io_invalidExc_pipe_v <= valid_stage0; // @[Valid.scala:141:24]
roundRawFNToRecFN_io_in_pipe_v <= valid_stage0; // @[Valid.scala:141:24]
roundRawFNToRecFN_io_roundingMode_pipe_v <= valid_stage0; // @[Valid.scala:141:24]
roundRawFNToRecFN_io_detectTininess_pipe_v <= valid_stage0; // @[Valid.scala:141:24]
io_validout_pipe_v <= valid_stage0; // @[Valid.scala:141:24]
end
if (io_validin_0) begin // @[FPU.scala:633:7]
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isSigNaNAny <= _mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny; // @[Valid.scala:142:26]
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isNaNAOrB <= _mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB; // @[Valid.scala:142:26]
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isInfA <= _mulAddRecFNToRaw_preMul_io_toPostMul_isInfA; // @[Valid.scala:142:26]
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isZeroA <= _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA; // @[Valid.scala:142:26]
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isInfB <= _mulAddRecFNToRaw_preMul_io_toPostMul_isInfB; // @[Valid.scala:142:26]
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isZeroB <= _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroB; // @[Valid.scala:142:26]
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_signProd <= _mulAddRecFNToRaw_preMul_io_toPostMul_signProd; // @[Valid.scala:142:26]
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isNaNC <= _mulAddRecFNToRaw_preMul_io_toPostMul_isNaNC; // @[Valid.scala:142:26]
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isInfC <= _mulAddRecFNToRaw_preMul_io_toPostMul_isInfC; // @[Valid.scala:142:26]
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isZeroC <= _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroC; // @[Valid.scala:142:26]
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_sExpSum <= _mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum; // @[Valid.scala:142:26]
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_doSubMags <= _mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags; // @[Valid.scala:142:26]
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_CIsDominant <= _mulAddRecFNToRaw_preMul_io_toPostMul_CIsDominant; // @[Valid.scala:142:26]
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_CDom_CAlignDist <= _mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist; // @[Valid.scala:142:26]
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_highAlignedSigC <= _mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC; // @[Valid.scala:142:26]
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_bit0AlignedSigC <= _mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC; // @[Valid.scala:142:26]
mulAddRecFNToRaw_postMul_io_mulAddResult_pipe_b <= mulAddResult; // @[Valid.scala:142:26]
mulAddRecFNToRaw_postMul_io_roundingMode_pipe_b <= io_roundingMode_0; // @[Valid.scala:142:26]
roundingMode_stage0_pipe_b <= io_roundingMode_0; // @[Valid.scala:142:26]
end
if (valid_stage0) begin // @[FPU.scala:667:28]
roundRawFNToRecFN_io_invalidExc_pipe_b <= _mulAddRecFNToRaw_postMul_io_invalidExc; // @[Valid.scala:142:26]
roundRawFNToRecFN_io_in_pipe_b_isNaN <= _mulAddRecFNToRaw_postMul_io_rawOut_isNaN; // @[Valid.scala:142:26]
roundRawFNToRecFN_io_in_pipe_b_isInf <= _mulAddRecFNToRaw_postMul_io_rawOut_isInf; // @[Valid.scala:142:26]
roundRawFNToRecFN_io_in_pipe_b_isZero <= _mulAddRecFNToRaw_postMul_io_rawOut_isZero; // @[Valid.scala:142:26]
roundRawFNToRecFN_io_in_pipe_b_sign <= _mulAddRecFNToRaw_postMul_io_rawOut_sign; // @[Valid.scala:142:26]
roundRawFNToRecFN_io_in_pipe_b_sExp <= _mulAddRecFNToRaw_postMul_io_rawOut_sExp; // @[Valid.scala:142:26]
roundRawFNToRecFN_io_in_pipe_b_sig <= _mulAddRecFNToRaw_postMul_io_rawOut_sig; // @[Valid.scala:142:26]
roundRawFNToRecFN_io_roundingMode_pipe_b <= roundingMode_stage0; // @[Valid.scala:142:26]
end
roundRawFNToRecFN_io_detectTininess_pipe_b <= valid_stage0 | roundRawFNToRecFN_io_detectTininess_pipe_b; // @[Valid.scala:142:26]
always @(posedge)
MulAddRecFNToRaw_preMul_e5_s11_2 mulAddRecFNToRaw_preMul ( // @[FPU.scala:654:41]
.io_op (io_op_0), // @[FPU.scala:633:7]
.io_a (io_a_0), // @[FPU.scala:633:7]
.io_b (io_b_0), // @[FPU.scala:633:7]
.io_c (io_c_0), // @[FPU.scala:633:7]
.io_mulAddA (_mulAddRecFNToRaw_preMul_io_mulAddA),
.io_mulAddB (_mulAddRecFNToRaw_preMul_io_mulAddB),
.io_mulAddC (_mulAddRecFNToRaw_preMul_io_mulAddC),
.io_toPostMul_isSigNaNAny (_mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny),
.io_toPostMul_isNaNAOrB (_mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB),
.io_toPostMul_isInfA (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfA),
.io_toPostMul_isZeroA (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA),
.io_toPostMul_isInfB (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfB),
.io_toPostMul_isZeroB (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroB),
.io_toPostMul_signProd (_mulAddRecFNToRaw_preMul_io_toPostMul_signProd),
.io_toPostMul_isNaNC (_mulAddRecFNToRaw_preMul_io_toPostMul_isNaNC),
.io_toPostMul_isInfC (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfC),
.io_toPostMul_isZeroC (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroC),
.io_toPostMul_sExpSum (_mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum),
.io_toPostMul_doSubMags (_mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags),
.io_toPostMul_CIsDominant (_mulAddRecFNToRaw_preMul_io_toPostMul_CIsDominant),
.io_toPostMul_CDom_CAlignDist (_mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist),
.io_toPostMul_highAlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC),
.io_toPostMul_bit0AlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC)
); // @[FPU.scala:654:41]
MulAddRecFNToRaw_postMul_e5_s11_2 mulAddRecFNToRaw_postMul ( // @[FPU.scala:655:42]
.io_fromPreMul_isSigNaNAny (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_isSigNaNAny), // @[Valid.scala:135:21]
.io_fromPreMul_isNaNAOrB (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_isNaNAOrB), // @[Valid.scala:135:21]
.io_fromPreMul_isInfA (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_isInfA), // @[Valid.scala:135:21]
.io_fromPreMul_isZeroA (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_isZeroA), // @[Valid.scala:135:21]
.io_fromPreMul_isInfB (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_isInfB), // @[Valid.scala:135:21]
.io_fromPreMul_isZeroB (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_isZeroB), // @[Valid.scala:135:21]
.io_fromPreMul_signProd (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_signProd), // @[Valid.scala:135:21]
.io_fromPreMul_isNaNC (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_isNaNC), // @[Valid.scala:135:21]
.io_fromPreMul_isInfC (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_isInfC), // @[Valid.scala:135:21]
.io_fromPreMul_isZeroC (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_isZeroC), // @[Valid.scala:135:21]
.io_fromPreMul_sExpSum (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_sExpSum), // @[Valid.scala:135:21]
.io_fromPreMul_doSubMags (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_doSubMags), // @[Valid.scala:135:21]
.io_fromPreMul_CIsDominant (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_CIsDominant), // @[Valid.scala:135:21]
.io_fromPreMul_CDom_CAlignDist (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_CDom_CAlignDist), // @[Valid.scala:135:21]
.io_fromPreMul_highAlignedSigC (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_highAlignedSigC), // @[Valid.scala:135:21]
.io_fromPreMul_bit0AlignedSigC (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_out_bits_bit0AlignedSigC), // @[Valid.scala:135:21]
.io_mulAddResult (mulAddRecFNToRaw_postMul_io_mulAddResult_pipe_out_bits), // @[Valid.scala:135:21]
.io_roundingMode (mulAddRecFNToRaw_postMul_io_roundingMode_pipe_out_bits), // @[Valid.scala:135:21]
.io_invalidExc (_mulAddRecFNToRaw_postMul_io_invalidExc),
.io_rawOut_isNaN (_mulAddRecFNToRaw_postMul_io_rawOut_isNaN),
.io_rawOut_isInf (_mulAddRecFNToRaw_postMul_io_rawOut_isInf),
.io_rawOut_isZero (_mulAddRecFNToRaw_postMul_io_rawOut_isZero),
.io_rawOut_sign (_mulAddRecFNToRaw_postMul_io_rawOut_sign),
.io_rawOut_sExp (_mulAddRecFNToRaw_postMul_io_rawOut_sExp),
.io_rawOut_sig (_mulAddRecFNToRaw_postMul_io_rawOut_sig)
); // @[FPU.scala:655:42]
RoundRawFNToRecFN_e5_s11_4 roundRawFNToRecFN ( // @[FPU.scala:682:35]
.io_invalidExc (roundRawFNToRecFN_io_invalidExc_pipe_out_bits), // @[Valid.scala:135:21]
.io_in_isNaN (roundRawFNToRecFN_io_in_pipe_out_bits_isNaN), // @[Valid.scala:135:21]
.io_in_isInf (roundRawFNToRecFN_io_in_pipe_out_bits_isInf), // @[Valid.scala:135:21]
.io_in_isZero (roundRawFNToRecFN_io_in_pipe_out_bits_isZero), // @[Valid.scala:135:21]
.io_in_sign (roundRawFNToRecFN_io_in_pipe_out_bits_sign), // @[Valid.scala:135:21]
.io_in_sExp (roundRawFNToRecFN_io_in_pipe_out_bits_sExp), // @[Valid.scala:135:21]
.io_in_sig (roundRawFNToRecFN_io_in_pipe_out_bits_sig), // @[Valid.scala:135:21]
.io_roundingMode (roundRawFNToRecFN_io_roundingMode_pipe_out_bits), // @[Valid.scala:135:21]
.io_detectTininess (roundRawFNToRecFN_io_detectTininess_pipe_out_bits), // @[Valid.scala:135:21]
.io_out (io_out_0),
.io_exceptionFlags (io_exceptionFlags_0)
); // @[FPU.scala:682:35]
assign io_out = io_out_0; // @[FPU.scala:633:7]
assign io_exceptionFlags = io_exceptionFlags_0; // @[FPU.scala:633:7]
assign io_validout = io_validout_0; // @[FPU.scala:633:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_140( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_248 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_57( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [4:0] io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35]
reg [8:0] a_first_counter; // @[Edges.scala:229:27]
reg [31:0] address; // @[Monitor.scala:391:22]
reg [8:0] d_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [3:0] size_1; // @[Monitor.scala:540:22]
reg [4:0] sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [1:0] inflight; // @[Monitor.scala:614:27]
reg [3:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [7:0] inflight_sizes; // @[Monitor.scala:618:33]
reg [8:0] a_first_counter_1; // @[Edges.scala:229:27]
wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
reg [8:0] d_first_counter_1; // @[Edges.scala:229:27]
wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire a_set = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35]
wire _GEN = io_in_d_valid & d_first_1; // @[Monitor.scala:674:26]
wire _GEN_0 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:36:7, :673:46, :674:74]
wire d_clr = _GEN & _GEN_0; // @[Monitor.scala:673:46, :674:{26,71,74}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
reg [1:0] inflight_1; // @[Monitor.scala:726:35]
reg [7:0] inflight_sizes_1; // @[Monitor.scala:728:35]
reg [8:0] d_first_counter_2; // @[Edges.scala:229:27]
wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire d_clr_1 = io_in_d_valid & d_first_2 & io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:36:7, :673:46, :784:26, :788:70]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File util.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v4.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v4.common.{MicroOp}
import boom.v4.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, flush: Bool, uop: MicroOp): Bool = {
return apply(brupdate, flush, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, flush: Bool, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask) || flush
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: T): Bool = {
return apply(brupdate, flush, bundle.uop)
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Bool = {
return apply(brupdate, flush, bundle.bits)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, flush, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v4.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U, IS_N}
def apply(i: UInt, isel: UInt): UInt = {
val ip = Mux(isel === IS_N, 0.U(LONGEST_IMM_SZ.W), i)
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0)
}
}
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
object IsYoungerMask
{
def apply(i: UInt, head: UInt, n: Integer): UInt = {
val hi_mask = ~MaskLower(UIntToOH(i)(n-1,0))
val lo_mask = ~MaskUpper(UIntToOH(head)(n-1,0))
Mux(i < head, hi_mask & lo_mask, hi_mask | lo_mask)(n-1,0)
}
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v4.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v4.common.MicroOp => Bool = u => true.B, fastDeq: Boolean = false)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
if (fastDeq && entries > 1) {
// Pipeline dequeue selection so the mux gets an entire cycle
val main = Module(new BranchKillableQueue(gen, entries-1, flush_fn, false))
val out_reg = Reg(gen)
val out_valid = RegInit(false.B)
val out_uop = Reg(new MicroOp)
main.io.enq <> io.enq
main.io.brupdate := io.brupdate
main.io.flush := io.flush
io.empty := main.io.empty && !out_valid
io.count := main.io.count + out_valid
io.deq.valid := out_valid
io.deq.bits := out_reg
io.deq.bits.uop := out_uop
out_uop := UpdateBrMask(io.brupdate, out_uop)
out_valid := out_valid && !IsKilledByBranch(io.brupdate, false.B, out_uop) && !(io.flush && flush_fn(out_uop))
main.io.deq.ready := false.B
when (io.deq.fire || !out_valid) {
out_valid := main.io.deq.valid && !IsKilledByBranch(io.brupdate, false.B, main.io.deq.bits.uop) && !(io.flush && flush_fn(main.io.deq.bits.uop))
out_reg := main.io.deq.bits
out_uop := UpdateBrMask(io.brupdate, main.io.deq.bits.uop)
main.io.deq.ready := true.B
}
} else {
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire && !IsKilledByBranch(io.brupdate, false.B, io.enq.bits.uop) && !(io.flush && flush_fn(io.enq.bits.uop)))
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, false.B, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value)
io.deq.bits := out
val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
}
class BranchKillablePipeline[T <: boom.v4.common.HasBoomUOP](gen: T, stages: Int)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val req = Input(Valid(gen))
val flush = Input(Bool())
val brupdate = Input(new BrUpdateInfo)
val resp = Output(Vec(stages, Valid(gen)))
})
require(stages > 0)
val uops = Reg(Vec(stages, Valid(gen)))
uops(0).valid := io.req.valid && !IsKilledByBranch(io.brupdate, io.flush, io.req.bits)
uops(0).bits := UpdateBrMask(io.brupdate, io.req.bits)
for (i <- 1 until stages) {
uops(i).valid := uops(i-1).valid && !IsKilledByBranch(io.brupdate, io.flush, uops(i-1).bits)
uops(i).bits := UpdateBrMask(io.brupdate, uops(i-1).bits)
}
for (i <- 0 until stages) { when (reset.asBool) { uops(i).valid := false.B } }
io.resp := uops
}
| module BranchKillableQueue_24( // @[util.scala:458:7]
input clock, // @[util.scala:458:7]
input reset, // @[util.scala:458:7]
output io_enq_ready, // @[util.scala:463:14]
input io_enq_valid, // @[util.scala:463:14]
input [31:0] io_enq_bits_uop_inst, // @[util.scala:463:14]
input [31:0] io_enq_bits_uop_debug_inst, // @[util.scala:463:14]
input io_enq_bits_uop_is_rvc, // @[util.scala:463:14]
input [33:0] io_enq_bits_uop_debug_pc, // @[util.scala:463:14]
input io_enq_bits_uop_iq_type_0, // @[util.scala:463:14]
input io_enq_bits_uop_iq_type_1, // @[util.scala:463:14]
input io_enq_bits_uop_iq_type_2, // @[util.scala:463:14]
input io_enq_bits_uop_iq_type_3, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_0, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_1, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_2, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_3, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_4, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_5, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_6, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_7, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_8, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_9, // @[util.scala:463:14]
input io_enq_bits_uop_iw_issued, // @[util.scala:463:14]
input io_enq_bits_uop_iw_issued_partial_agen, // @[util.scala:463:14]
input io_enq_bits_uop_iw_issued_partial_dgen, // @[util.scala:463:14]
input io_enq_bits_uop_iw_p1_speculative_child, // @[util.scala:463:14]
input io_enq_bits_uop_iw_p2_speculative_child, // @[util.scala:463:14]
input io_enq_bits_uop_iw_p1_bypass_hint, // @[util.scala:463:14]
input io_enq_bits_uop_iw_p2_bypass_hint, // @[util.scala:463:14]
input io_enq_bits_uop_iw_p3_bypass_hint, // @[util.scala:463:14]
input io_enq_bits_uop_dis_col_sel, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_br_mask, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_br_tag, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_br_type, // @[util.scala:463:14]
input io_enq_bits_uop_is_sfb, // @[util.scala:463:14]
input io_enq_bits_uop_is_fence, // @[util.scala:463:14]
input io_enq_bits_uop_is_fencei, // @[util.scala:463:14]
input io_enq_bits_uop_is_sfence, // @[util.scala:463:14]
input io_enq_bits_uop_is_amo, // @[util.scala:463:14]
input io_enq_bits_uop_is_eret, // @[util.scala:463:14]
input io_enq_bits_uop_is_sys_pc2epc, // @[util.scala:463:14]
input io_enq_bits_uop_is_rocc, // @[util.scala:463:14]
input io_enq_bits_uop_is_mov, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_ftq_idx, // @[util.scala:463:14]
input io_enq_bits_uop_edge_inst, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_pc_lob, // @[util.scala:463:14]
input io_enq_bits_uop_taken, // @[util.scala:463:14]
input io_enq_bits_uop_imm_rename, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_imm_sel, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_pimm, // @[util.scala:463:14]
input [19:0] io_enq_bits_uop_imm_packed, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_op1_sel, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_op2_sel, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_ldst, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_wen, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_ren1, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_ren2, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_ren3, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_swap12, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_swap23, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_fp_ctrl_typeTagIn, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_fp_ctrl_typeTagOut, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_fromint, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_toint, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_fastpipe, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_fma, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_div, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_sqrt, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_wflags, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_vec, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_rob_idx, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_ldq_idx, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_stq_idx, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_rxq_idx, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_pdst, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_prs1, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_prs2, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_prs3, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_ppred, // @[util.scala:463:14]
input io_enq_bits_uop_prs1_busy, // @[util.scala:463:14]
input io_enq_bits_uop_prs2_busy, // @[util.scala:463:14]
input io_enq_bits_uop_prs3_busy, // @[util.scala:463:14]
input io_enq_bits_uop_ppred_busy, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_stale_pdst, // @[util.scala:463:14]
input io_enq_bits_uop_exception, // @[util.scala:463:14]
input [63:0] io_enq_bits_uop_exc_cause, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_mem_cmd, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_mem_size, // @[util.scala:463:14]
input io_enq_bits_uop_mem_signed, // @[util.scala:463:14]
input io_enq_bits_uop_uses_ldq, // @[util.scala:463:14]
input io_enq_bits_uop_uses_stq, // @[util.scala:463:14]
input io_enq_bits_uop_is_unique, // @[util.scala:463:14]
input io_enq_bits_uop_flush_on_commit, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_csr_cmd, // @[util.scala:463:14]
input io_enq_bits_uop_ldst_is_rs1, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_ldst, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_lrs1, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_lrs2, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_lrs3, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_dst_rtype, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_lrs1_rtype, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_lrs2_rtype, // @[util.scala:463:14]
input io_enq_bits_uop_frs3_en, // @[util.scala:463:14]
input io_enq_bits_uop_fcn_dw, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_fcn_op, // @[util.scala:463:14]
input io_enq_bits_uop_fp_val, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_fp_rm, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_fp_typ, // @[util.scala:463:14]
input io_enq_bits_uop_xcpt_pf_if, // @[util.scala:463:14]
input io_enq_bits_uop_xcpt_ae_if, // @[util.scala:463:14]
input io_enq_bits_uop_xcpt_ma_if, // @[util.scala:463:14]
input io_enq_bits_uop_bp_debug_if, // @[util.scala:463:14]
input io_enq_bits_uop_bp_xcpt_if, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_debug_fsrc, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_debug_tsrc, // @[util.scala:463:14]
input [33:0] io_enq_bits_addr, // @[util.scala:463:14]
input [63:0] io_enq_bits_data, // @[util.scala:463:14]
input io_enq_bits_is_hella, // @[util.scala:463:14]
input io_enq_bits_tag_match, // @[util.scala:463:14]
input [1:0] io_enq_bits_old_meta_coh_state, // @[util.scala:463:14]
input [21:0] io_enq_bits_old_meta_tag, // @[util.scala:463:14]
input [1:0] io_enq_bits_way_en, // @[util.scala:463:14]
input [4:0] io_enq_bits_sdq_id, // @[util.scala:463:14]
input io_deq_ready, // @[util.scala:463:14]
output io_deq_valid, // @[util.scala:463:14]
output [31:0] io_deq_bits_uop_inst, // @[util.scala:463:14]
output [31:0] io_deq_bits_uop_debug_inst, // @[util.scala:463:14]
output io_deq_bits_uop_is_rvc, // @[util.scala:463:14]
output [33:0] io_deq_bits_uop_debug_pc, // @[util.scala:463:14]
output io_deq_bits_uop_iq_type_0, // @[util.scala:463:14]
output io_deq_bits_uop_iq_type_1, // @[util.scala:463:14]
output io_deq_bits_uop_iq_type_2, // @[util.scala:463:14]
output io_deq_bits_uop_iq_type_3, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_0, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_1, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_2, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_3, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_4, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_5, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_6, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_7, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_8, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_9, // @[util.scala:463:14]
output io_deq_bits_uop_iw_issued, // @[util.scala:463:14]
output io_deq_bits_uop_iw_issued_partial_agen, // @[util.scala:463:14]
output io_deq_bits_uop_iw_issued_partial_dgen, // @[util.scala:463:14]
output io_deq_bits_uop_iw_p1_speculative_child, // @[util.scala:463:14]
output io_deq_bits_uop_iw_p2_speculative_child, // @[util.scala:463:14]
output io_deq_bits_uop_iw_p1_bypass_hint, // @[util.scala:463:14]
output io_deq_bits_uop_iw_p2_bypass_hint, // @[util.scala:463:14]
output io_deq_bits_uop_iw_p3_bypass_hint, // @[util.scala:463:14]
output io_deq_bits_uop_dis_col_sel, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_br_mask, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_br_tag, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_br_type, // @[util.scala:463:14]
output io_deq_bits_uop_is_sfb, // @[util.scala:463:14]
output io_deq_bits_uop_is_fence, // @[util.scala:463:14]
output io_deq_bits_uop_is_fencei, // @[util.scala:463:14]
output io_deq_bits_uop_is_sfence, // @[util.scala:463:14]
output io_deq_bits_uop_is_amo, // @[util.scala:463:14]
output io_deq_bits_uop_is_eret, // @[util.scala:463:14]
output io_deq_bits_uop_is_sys_pc2epc, // @[util.scala:463:14]
output io_deq_bits_uop_is_rocc, // @[util.scala:463:14]
output io_deq_bits_uop_is_mov, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_ftq_idx, // @[util.scala:463:14]
output io_deq_bits_uop_edge_inst, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_pc_lob, // @[util.scala:463:14]
output io_deq_bits_uop_taken, // @[util.scala:463:14]
output io_deq_bits_uop_imm_rename, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_imm_sel, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_pimm, // @[util.scala:463:14]
output [19:0] io_deq_bits_uop_imm_packed, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_op1_sel, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_op2_sel, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_ldst, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_wen, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_ren1, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_ren2, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_ren3, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_swap12, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_swap23, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_fp_ctrl_typeTagIn, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_fp_ctrl_typeTagOut, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_fromint, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_toint, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_fastpipe, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_fma, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_div, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_sqrt, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_wflags, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_vec, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_rob_idx, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_ldq_idx, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_stq_idx, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_rxq_idx, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_pdst, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_prs1, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_prs2, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_prs3, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_ppred, // @[util.scala:463:14]
output io_deq_bits_uop_prs1_busy, // @[util.scala:463:14]
output io_deq_bits_uop_prs2_busy, // @[util.scala:463:14]
output io_deq_bits_uop_prs3_busy, // @[util.scala:463:14]
output io_deq_bits_uop_ppred_busy, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_stale_pdst, // @[util.scala:463:14]
output io_deq_bits_uop_exception, // @[util.scala:463:14]
output [63:0] io_deq_bits_uop_exc_cause, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_mem_cmd, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_mem_size, // @[util.scala:463:14]
output io_deq_bits_uop_mem_signed, // @[util.scala:463:14]
output io_deq_bits_uop_uses_ldq, // @[util.scala:463:14]
output io_deq_bits_uop_uses_stq, // @[util.scala:463:14]
output io_deq_bits_uop_is_unique, // @[util.scala:463:14]
output io_deq_bits_uop_flush_on_commit, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_csr_cmd, // @[util.scala:463:14]
output io_deq_bits_uop_ldst_is_rs1, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_ldst, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_lrs1, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_lrs2, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_lrs3, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_dst_rtype, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_lrs1_rtype, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_lrs2_rtype, // @[util.scala:463:14]
output io_deq_bits_uop_frs3_en, // @[util.scala:463:14]
output io_deq_bits_uop_fcn_dw, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_fcn_op, // @[util.scala:463:14]
output io_deq_bits_uop_fp_val, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_fp_rm, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_fp_typ, // @[util.scala:463:14]
output io_deq_bits_uop_xcpt_pf_if, // @[util.scala:463:14]
output io_deq_bits_uop_xcpt_ae_if, // @[util.scala:463:14]
output io_deq_bits_uop_xcpt_ma_if, // @[util.scala:463:14]
output io_deq_bits_uop_bp_debug_if, // @[util.scala:463:14]
output io_deq_bits_uop_bp_xcpt_if, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_debug_fsrc, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_debug_tsrc, // @[util.scala:463:14]
output [33:0] io_deq_bits_addr, // @[util.scala:463:14]
output [63:0] io_deq_bits_data, // @[util.scala:463:14]
output io_deq_bits_is_hella, // @[util.scala:463:14]
output io_deq_bits_tag_match, // @[util.scala:463:14]
output [1:0] io_deq_bits_old_meta_coh_state, // @[util.scala:463:14]
output [21:0] io_deq_bits_old_meta_tag, // @[util.scala:463:14]
output [1:0] io_deq_bits_way_en, // @[util.scala:463:14]
output [4:0] io_deq_bits_sdq_id, // @[util.scala:463:14]
output io_empty, // @[util.scala:463:14]
output [3:0] io_count // @[util.scala:463:14]
);
wire [130:0] _ram_ext_R0_data; // @[util.scala:503:22]
wire io_enq_valid_0 = io_enq_valid; // @[util.scala:458:7]
wire [31:0] io_enq_bits_uop_inst_0 = io_enq_bits_uop_inst; // @[util.scala:458:7]
wire [31:0] io_enq_bits_uop_debug_inst_0 = io_enq_bits_uop_debug_inst; // @[util.scala:458:7]
wire io_enq_bits_uop_is_rvc_0 = io_enq_bits_uop_is_rvc; // @[util.scala:458:7]
wire [33:0] io_enq_bits_uop_debug_pc_0 = io_enq_bits_uop_debug_pc; // @[util.scala:458:7]
wire io_enq_bits_uop_iq_type_0_0 = io_enq_bits_uop_iq_type_0; // @[util.scala:458:7]
wire io_enq_bits_uop_iq_type_1_0 = io_enq_bits_uop_iq_type_1; // @[util.scala:458:7]
wire io_enq_bits_uop_iq_type_2_0 = io_enq_bits_uop_iq_type_2; // @[util.scala:458:7]
wire io_enq_bits_uop_iq_type_3_0 = io_enq_bits_uop_iq_type_3; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_0_0 = io_enq_bits_uop_fu_code_0; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_1_0 = io_enq_bits_uop_fu_code_1; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_2_0 = io_enq_bits_uop_fu_code_2; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_3_0 = io_enq_bits_uop_fu_code_3; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_4_0 = io_enq_bits_uop_fu_code_4; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_5_0 = io_enq_bits_uop_fu_code_5; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_6_0 = io_enq_bits_uop_fu_code_6; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_7_0 = io_enq_bits_uop_fu_code_7; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_8_0 = io_enq_bits_uop_fu_code_8; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_9_0 = io_enq_bits_uop_fu_code_9; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_issued_0 = io_enq_bits_uop_iw_issued; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_issued_partial_agen_0 = io_enq_bits_uop_iw_issued_partial_agen; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_issued_partial_dgen_0 = io_enq_bits_uop_iw_issued_partial_dgen; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_p1_speculative_child_0 = io_enq_bits_uop_iw_p1_speculative_child; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_p2_speculative_child_0 = io_enq_bits_uop_iw_p2_speculative_child; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_p1_bypass_hint_0 = io_enq_bits_uop_iw_p1_bypass_hint; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_p2_bypass_hint_0 = io_enq_bits_uop_iw_p2_bypass_hint; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_p3_bypass_hint_0 = io_enq_bits_uop_iw_p3_bypass_hint; // @[util.scala:458:7]
wire io_enq_bits_uop_dis_col_sel_0 = io_enq_bits_uop_dis_col_sel; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_br_mask_0 = io_enq_bits_uop_br_mask; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_br_tag_0 = io_enq_bits_uop_br_tag; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_br_type_0 = io_enq_bits_uop_br_type; // @[util.scala:458:7]
wire io_enq_bits_uop_is_sfb_0 = io_enq_bits_uop_is_sfb; // @[util.scala:458:7]
wire io_enq_bits_uop_is_fence_0 = io_enq_bits_uop_is_fence; // @[util.scala:458:7]
wire io_enq_bits_uop_is_fencei_0 = io_enq_bits_uop_is_fencei; // @[util.scala:458:7]
wire io_enq_bits_uop_is_sfence_0 = io_enq_bits_uop_is_sfence; // @[util.scala:458:7]
wire io_enq_bits_uop_is_amo_0 = io_enq_bits_uop_is_amo; // @[util.scala:458:7]
wire io_enq_bits_uop_is_eret_0 = io_enq_bits_uop_is_eret; // @[util.scala:458:7]
wire io_enq_bits_uop_is_sys_pc2epc_0 = io_enq_bits_uop_is_sys_pc2epc; // @[util.scala:458:7]
wire io_enq_bits_uop_is_rocc_0 = io_enq_bits_uop_is_rocc; // @[util.scala:458:7]
wire io_enq_bits_uop_is_mov_0 = io_enq_bits_uop_is_mov; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_ftq_idx_0 = io_enq_bits_uop_ftq_idx; // @[util.scala:458:7]
wire io_enq_bits_uop_edge_inst_0 = io_enq_bits_uop_edge_inst; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_pc_lob_0 = io_enq_bits_uop_pc_lob; // @[util.scala:458:7]
wire io_enq_bits_uop_taken_0 = io_enq_bits_uop_taken; // @[util.scala:458:7]
wire io_enq_bits_uop_imm_rename_0 = io_enq_bits_uop_imm_rename; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_imm_sel_0 = io_enq_bits_uop_imm_sel; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_pimm_0 = io_enq_bits_uop_pimm; // @[util.scala:458:7]
wire [19:0] io_enq_bits_uop_imm_packed_0 = io_enq_bits_uop_imm_packed; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_op1_sel_0 = io_enq_bits_uop_op1_sel; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_op2_sel_0 = io_enq_bits_uop_op2_sel; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_ldst_0 = io_enq_bits_uop_fp_ctrl_ldst; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_wen_0 = io_enq_bits_uop_fp_ctrl_wen; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_ren1_0 = io_enq_bits_uop_fp_ctrl_ren1; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_ren2_0 = io_enq_bits_uop_fp_ctrl_ren2; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_ren3_0 = io_enq_bits_uop_fp_ctrl_ren3; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_swap12_0 = io_enq_bits_uop_fp_ctrl_swap12; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_swap23_0 = io_enq_bits_uop_fp_ctrl_swap23; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_fp_ctrl_typeTagIn_0 = io_enq_bits_uop_fp_ctrl_typeTagIn; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_fp_ctrl_typeTagOut_0 = io_enq_bits_uop_fp_ctrl_typeTagOut; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_fromint_0 = io_enq_bits_uop_fp_ctrl_fromint; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_toint_0 = io_enq_bits_uop_fp_ctrl_toint; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_fastpipe_0 = io_enq_bits_uop_fp_ctrl_fastpipe; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_fma_0 = io_enq_bits_uop_fp_ctrl_fma; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_div_0 = io_enq_bits_uop_fp_ctrl_div; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_sqrt_0 = io_enq_bits_uop_fp_ctrl_sqrt; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_wflags_0 = io_enq_bits_uop_fp_ctrl_wflags; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_vec_0 = io_enq_bits_uop_fp_ctrl_vec; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_rob_idx_0 = io_enq_bits_uop_rob_idx; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_ldq_idx_0 = io_enq_bits_uop_ldq_idx; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_stq_idx_0 = io_enq_bits_uop_stq_idx; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_rxq_idx_0 = io_enq_bits_uop_rxq_idx; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_pdst_0 = io_enq_bits_uop_pdst; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_prs1_0 = io_enq_bits_uop_prs1; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_prs2_0 = io_enq_bits_uop_prs2; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_prs3_0 = io_enq_bits_uop_prs3; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_ppred_0 = io_enq_bits_uop_ppred; // @[util.scala:458:7]
wire io_enq_bits_uop_prs1_busy_0 = io_enq_bits_uop_prs1_busy; // @[util.scala:458:7]
wire io_enq_bits_uop_prs2_busy_0 = io_enq_bits_uop_prs2_busy; // @[util.scala:458:7]
wire io_enq_bits_uop_prs3_busy_0 = io_enq_bits_uop_prs3_busy; // @[util.scala:458:7]
wire io_enq_bits_uop_ppred_busy_0 = io_enq_bits_uop_ppred_busy; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_stale_pdst_0 = io_enq_bits_uop_stale_pdst; // @[util.scala:458:7]
wire io_enq_bits_uop_exception_0 = io_enq_bits_uop_exception; // @[util.scala:458:7]
wire [63:0] io_enq_bits_uop_exc_cause_0 = io_enq_bits_uop_exc_cause; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_mem_cmd_0 = io_enq_bits_uop_mem_cmd; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_mem_size_0 = io_enq_bits_uop_mem_size; // @[util.scala:458:7]
wire io_enq_bits_uop_mem_signed_0 = io_enq_bits_uop_mem_signed; // @[util.scala:458:7]
wire io_enq_bits_uop_uses_ldq_0 = io_enq_bits_uop_uses_ldq; // @[util.scala:458:7]
wire io_enq_bits_uop_uses_stq_0 = io_enq_bits_uop_uses_stq; // @[util.scala:458:7]
wire io_enq_bits_uop_is_unique_0 = io_enq_bits_uop_is_unique; // @[util.scala:458:7]
wire io_enq_bits_uop_flush_on_commit_0 = io_enq_bits_uop_flush_on_commit; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_csr_cmd_0 = io_enq_bits_uop_csr_cmd; // @[util.scala:458:7]
wire io_enq_bits_uop_ldst_is_rs1_0 = io_enq_bits_uop_ldst_is_rs1; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_ldst_0 = io_enq_bits_uop_ldst; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_lrs1_0 = io_enq_bits_uop_lrs1; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_lrs2_0 = io_enq_bits_uop_lrs2; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_lrs3_0 = io_enq_bits_uop_lrs3; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_dst_rtype_0 = io_enq_bits_uop_dst_rtype; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_lrs1_rtype_0 = io_enq_bits_uop_lrs1_rtype; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_lrs2_rtype_0 = io_enq_bits_uop_lrs2_rtype; // @[util.scala:458:7]
wire io_enq_bits_uop_frs3_en_0 = io_enq_bits_uop_frs3_en; // @[util.scala:458:7]
wire io_enq_bits_uop_fcn_dw_0 = io_enq_bits_uop_fcn_dw; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_fcn_op_0 = io_enq_bits_uop_fcn_op; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_val_0 = io_enq_bits_uop_fp_val; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_fp_rm_0 = io_enq_bits_uop_fp_rm; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_fp_typ_0 = io_enq_bits_uop_fp_typ; // @[util.scala:458:7]
wire io_enq_bits_uop_xcpt_pf_if_0 = io_enq_bits_uop_xcpt_pf_if; // @[util.scala:458:7]
wire io_enq_bits_uop_xcpt_ae_if_0 = io_enq_bits_uop_xcpt_ae_if; // @[util.scala:458:7]
wire io_enq_bits_uop_xcpt_ma_if_0 = io_enq_bits_uop_xcpt_ma_if; // @[util.scala:458:7]
wire io_enq_bits_uop_bp_debug_if_0 = io_enq_bits_uop_bp_debug_if; // @[util.scala:458:7]
wire io_enq_bits_uop_bp_xcpt_if_0 = io_enq_bits_uop_bp_xcpt_if; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_debug_fsrc_0 = io_enq_bits_uop_debug_fsrc; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_debug_tsrc_0 = io_enq_bits_uop_debug_tsrc; // @[util.scala:458:7]
wire [33:0] io_enq_bits_addr_0 = io_enq_bits_addr; // @[util.scala:458:7]
wire [63:0] io_enq_bits_data_0 = io_enq_bits_data; // @[util.scala:458:7]
wire io_enq_bits_is_hella_0 = io_enq_bits_is_hella; // @[util.scala:458:7]
wire io_enq_bits_tag_match_0 = io_enq_bits_tag_match; // @[util.scala:458:7]
wire [1:0] io_enq_bits_old_meta_coh_state_0 = io_enq_bits_old_meta_coh_state; // @[util.scala:458:7]
wire [21:0] io_enq_bits_old_meta_tag_0 = io_enq_bits_old_meta_tag; // @[util.scala:458:7]
wire [1:0] io_enq_bits_way_en_0 = io_enq_bits_way_en; // @[util.scala:458:7]
wire [4:0] io_enq_bits_sdq_id_0 = io_enq_bits_sdq_id; // @[util.scala:458:7]
wire io_deq_ready_0 = io_deq_ready; // @[util.scala:458:7]
wire _do_enq_T_4 = 1'h1; // @[util.scala:514:42]
wire _do_enq_T_7 = 1'h1; // @[util.scala:514:102]
wire _valids_0_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_0_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_1_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_1_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_2_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_2_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_3_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_3_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_4_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_4_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_5_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_5_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_6_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_6_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_7_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_7_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_8_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_8_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_9_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_9_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_10_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_10_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_11_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_11_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_12_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_12_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_13_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_13_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_14_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_14_T_6 = 1'h1; // @[util.scala:520:83]
wire [3:0] _uops_0_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_1_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_2_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_3_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_4_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_5_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_6_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_7_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_8_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_9_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_10_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_11_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_12_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_13_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_14_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_br_mask_T = 4'hF; // @[util.scala:93:27]
wire [20:0] io_brupdate_b2_target_offset = 21'h0; // @[util.scala:458:7, :463:14]
wire [63:0] io_brupdate_b2_uop_exc_cause = 64'h0; // @[util.scala:458:7, :463:14]
wire [19:0] io_brupdate_b2_uop_imm_packed = 20'h0; // @[util.scala:458:7, :463:14]
wire [4:0] io_brupdate_b2_uop_pimm = 5'h0; // @[util.scala:458:7, :463:14]
wire [4:0] io_brupdate_b2_uop_rob_idx = 5'h0; // @[util.scala:458:7, :463:14]
wire [4:0] io_brupdate_b2_uop_mem_cmd = 5'h0; // @[util.scala:458:7, :463:14]
wire [4:0] io_brupdate_b2_uop_fcn_op = 5'h0; // @[util.scala:458:7, :463:14]
wire [2:0] io_brupdate_b2_uop_imm_sel = 3'h0; // @[util.scala:458:7, :463:14]
wire [2:0] io_brupdate_b2_uop_op2_sel = 3'h0; // @[util.scala:458:7, :463:14]
wire [2:0] io_brupdate_b2_uop_csr_cmd = 3'h0; // @[util.scala:458:7, :463:14]
wire [2:0] io_brupdate_b2_uop_fp_rm = 3'h0; // @[util.scala:458:7, :463:14]
wire [2:0] io_brupdate_b2_uop_debug_fsrc = 3'h0; // @[util.scala:458:7, :463:14]
wire [2:0] io_brupdate_b2_uop_debug_tsrc = 3'h0; // @[util.scala:458:7, :463:14]
wire [2:0] io_brupdate_b2_cfi_type = 3'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_pc_lob = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_pdst = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_prs1 = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_prs2 = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_prs3 = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_stale_pdst = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_ldst = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_lrs1 = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_lrs2 = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_lrs3 = 6'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_br_tag = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_op1_sel = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagIn = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagOut = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_rxq_idx = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_mem_size = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_dst_rtype = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_lrs1_rtype = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_lrs2_rtype = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_fp_typ = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_pc_sel = 2'h0; // @[util.scala:458:7, :463:14]
wire [33:0] io_brupdate_b2_uop_debug_pc = 34'h0; // @[util.scala:458:7, :463:14]
wire [33:0] io_brupdate_b2_jalr_target = 34'h0; // @[util.scala:458:7, :463:14]
wire io_brupdate_b2_uop_is_rvc = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iq_type_0 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iq_type_1 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iq_type_2 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iq_type_3 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_0 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_1 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_2 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_3 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_4 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_5 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_6 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_7 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_8 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_9 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_issued = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_issued_partial_agen = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_issued_partial_dgen = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_p1_speculative_child = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_p2_speculative_child = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_p1_bypass_hint = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_p2_bypass_hint = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_p3_bypass_hint = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_dis_col_sel = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_sfb = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_fence = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_fencei = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_sfence = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_amo = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_eret = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_sys_pc2epc = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_rocc = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_mov = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_edge_inst = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_taken = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_imm_rename = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_ldst = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_wen = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_ren1 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_ren2 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_ren3 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_swap12 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_swap23 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_fromint = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_toint = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_fastpipe = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_fma = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_div = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_sqrt = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_wflags = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_vec = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_prs1_busy = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_prs2_busy = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_prs3_busy = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_ppred_busy = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_exception = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_mem_signed = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_uses_ldq = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_uses_stq = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_unique = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_flush_on_commit = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_ldst_is_rs1 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_frs3_en = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fcn_dw = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_val = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_xcpt_pf_if = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_xcpt_ae_if = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_xcpt_ma_if = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_bp_debug_if = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_bp_xcpt_if = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_mispredict = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_taken = 1'h0; // @[util.scala:458:7]
wire io_flush = 1'h0; // @[util.scala:458:7]
wire _valids_WIRE_0 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_1 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_2 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_3 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_4 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_5 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_6 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_7 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_8 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_9 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_10 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_11 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_12 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_13 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_14 = 1'h0; // @[util.scala:504:34]
wire _do_enq_T_2 = 1'h0; // @[util.scala:126:59]
wire _do_enq_T_3 = 1'h0; // @[util.scala:61:61]
wire _do_enq_T_6 = 1'h0; // @[util.scala:514:113]
wire _valids_0_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_0_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_0_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_1_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_1_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_1_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_2_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_2_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_2_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_3_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_3_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_3_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_4_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_4_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_4_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_5_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_5_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_5_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_6_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_6_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_6_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_7_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_7_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_7_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_8_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_8_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_8_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_9_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_9_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_9_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_10_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_10_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_10_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_11_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_11_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_11_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_12_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_12_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_12_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_13_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_13_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_13_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_14_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_14_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_14_T_5 = 1'h0; // @[util.scala:520:94]
wire [31:0] io_brupdate_b2_uop_inst = 32'h0; // @[util.scala:458:7, :463:14]
wire [31:0] io_brupdate_b2_uop_debug_inst = 32'h0; // @[util.scala:458:7, :463:14]
wire [3:0] io_brupdate_b1_resolve_mask = 4'h0; // @[util.scala:458:7]
wire [3:0] io_brupdate_b1_mispredict_mask = 4'h0; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_br_mask = 4'h0; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_br_type = 4'h0; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_ftq_idx = 4'h0; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_ldq_idx = 4'h0; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_stq_idx = 4'h0; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_ppred = 4'h0; // @[util.scala:458:7]
wire [3:0] _do_enq_T_1 = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_0_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_1_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_2_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_3_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_4_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_5_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_6_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_7_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_8_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_9_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_10_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_11_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_12_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_13_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_14_T = 4'h0; // @[util.scala:126:51]
wire _io_enq_ready_T; // @[util.scala:543:21]
wire [3:0] _uops_br_mask_T_1 = io_enq_bits_uop_br_mask_0; // @[util.scala:93:25, :458:7]
wire _io_deq_valid_T_1; // @[util.scala:548:42]
wire [31:0] out_uop_inst; // @[util.scala:545:19]
wire [31:0] out_uop_debug_inst; // @[util.scala:545:19]
wire out_uop_is_rvc; // @[util.scala:545:19]
wire [33:0] out_uop_debug_pc; // @[util.scala:545:19]
wire out_uop_iq_type_0; // @[util.scala:545:19]
wire out_uop_iq_type_1; // @[util.scala:545:19]
wire out_uop_iq_type_2; // @[util.scala:545:19]
wire out_uop_iq_type_3; // @[util.scala:545:19]
wire out_uop_fu_code_0; // @[util.scala:545:19]
wire out_uop_fu_code_1; // @[util.scala:545:19]
wire out_uop_fu_code_2; // @[util.scala:545:19]
wire out_uop_fu_code_3; // @[util.scala:545:19]
wire out_uop_fu_code_4; // @[util.scala:545:19]
wire out_uop_fu_code_5; // @[util.scala:545:19]
wire out_uop_fu_code_6; // @[util.scala:545:19]
wire out_uop_fu_code_7; // @[util.scala:545:19]
wire out_uop_fu_code_8; // @[util.scala:545:19]
wire out_uop_fu_code_9; // @[util.scala:545:19]
wire out_uop_iw_issued; // @[util.scala:545:19]
wire out_uop_iw_issued_partial_agen; // @[util.scala:545:19]
wire out_uop_iw_issued_partial_dgen; // @[util.scala:545:19]
wire out_uop_iw_p1_speculative_child; // @[util.scala:545:19]
wire out_uop_iw_p2_speculative_child; // @[util.scala:545:19]
wire out_uop_iw_p1_bypass_hint; // @[util.scala:545:19]
wire out_uop_iw_p2_bypass_hint; // @[util.scala:545:19]
wire out_uop_iw_p3_bypass_hint; // @[util.scala:545:19]
wire out_uop_dis_col_sel; // @[util.scala:545:19]
wire [3:0] out_uop_br_mask; // @[util.scala:545:19]
wire [1:0] out_uop_br_tag; // @[util.scala:545:19]
wire [3:0] out_uop_br_type; // @[util.scala:545:19]
wire out_uop_is_sfb; // @[util.scala:545:19]
wire out_uop_is_fence; // @[util.scala:545:19]
wire out_uop_is_fencei; // @[util.scala:545:19]
wire out_uop_is_sfence; // @[util.scala:545:19]
wire out_uop_is_amo; // @[util.scala:545:19]
wire out_uop_is_eret; // @[util.scala:545:19]
wire out_uop_is_sys_pc2epc; // @[util.scala:545:19]
wire out_uop_is_rocc; // @[util.scala:545:19]
wire out_uop_is_mov; // @[util.scala:545:19]
wire [3:0] out_uop_ftq_idx; // @[util.scala:545:19]
wire out_uop_edge_inst; // @[util.scala:545:19]
wire [5:0] out_uop_pc_lob; // @[util.scala:545:19]
wire out_uop_taken; // @[util.scala:545:19]
wire out_uop_imm_rename; // @[util.scala:545:19]
wire [2:0] out_uop_imm_sel; // @[util.scala:545:19]
wire [4:0] out_uop_pimm; // @[util.scala:545:19]
wire [19:0] out_uop_imm_packed; // @[util.scala:545:19]
wire [1:0] out_uop_op1_sel; // @[util.scala:545:19]
wire [2:0] out_uop_op2_sel; // @[util.scala:545:19]
wire out_uop_fp_ctrl_ldst; // @[util.scala:545:19]
wire out_uop_fp_ctrl_wen; // @[util.scala:545:19]
wire out_uop_fp_ctrl_ren1; // @[util.scala:545:19]
wire out_uop_fp_ctrl_ren2; // @[util.scala:545:19]
wire out_uop_fp_ctrl_ren3; // @[util.scala:545:19]
wire out_uop_fp_ctrl_swap12; // @[util.scala:545:19]
wire out_uop_fp_ctrl_swap23; // @[util.scala:545:19]
wire [1:0] out_uop_fp_ctrl_typeTagIn; // @[util.scala:545:19]
wire [1:0] out_uop_fp_ctrl_typeTagOut; // @[util.scala:545:19]
wire out_uop_fp_ctrl_fromint; // @[util.scala:545:19]
wire out_uop_fp_ctrl_toint; // @[util.scala:545:19]
wire out_uop_fp_ctrl_fastpipe; // @[util.scala:545:19]
wire out_uop_fp_ctrl_fma; // @[util.scala:545:19]
wire out_uop_fp_ctrl_div; // @[util.scala:545:19]
wire out_uop_fp_ctrl_sqrt; // @[util.scala:545:19]
wire out_uop_fp_ctrl_wflags; // @[util.scala:545:19]
wire out_uop_fp_ctrl_vec; // @[util.scala:545:19]
wire [4:0] out_uop_rob_idx; // @[util.scala:545:19]
wire [3:0] out_uop_ldq_idx; // @[util.scala:545:19]
wire [3:0] out_uop_stq_idx; // @[util.scala:545:19]
wire [1:0] out_uop_rxq_idx; // @[util.scala:545:19]
wire [5:0] out_uop_pdst; // @[util.scala:545:19]
wire [5:0] out_uop_prs1; // @[util.scala:545:19]
wire [5:0] out_uop_prs2; // @[util.scala:545:19]
wire [5:0] out_uop_prs3; // @[util.scala:545:19]
wire [3:0] out_uop_ppred; // @[util.scala:545:19]
wire out_uop_prs1_busy; // @[util.scala:545:19]
wire out_uop_prs2_busy; // @[util.scala:545:19]
wire out_uop_prs3_busy; // @[util.scala:545:19]
wire out_uop_ppred_busy; // @[util.scala:545:19]
wire [5:0] out_uop_stale_pdst; // @[util.scala:545:19]
wire out_uop_exception; // @[util.scala:545:19]
wire [63:0] out_uop_exc_cause; // @[util.scala:545:19]
wire [4:0] out_uop_mem_cmd; // @[util.scala:545:19]
wire [1:0] out_uop_mem_size; // @[util.scala:545:19]
wire out_uop_mem_signed; // @[util.scala:545:19]
wire out_uop_uses_ldq; // @[util.scala:545:19]
wire out_uop_uses_stq; // @[util.scala:545:19]
wire out_uop_is_unique; // @[util.scala:545:19]
wire out_uop_flush_on_commit; // @[util.scala:545:19]
wire [2:0] out_uop_csr_cmd; // @[util.scala:545:19]
wire out_uop_ldst_is_rs1; // @[util.scala:545:19]
wire [5:0] out_uop_ldst; // @[util.scala:545:19]
wire [5:0] out_uop_lrs1; // @[util.scala:545:19]
wire [5:0] out_uop_lrs2; // @[util.scala:545:19]
wire [5:0] out_uop_lrs3; // @[util.scala:545:19]
wire [1:0] out_uop_dst_rtype; // @[util.scala:545:19]
wire [1:0] out_uop_lrs1_rtype; // @[util.scala:545:19]
wire [1:0] out_uop_lrs2_rtype; // @[util.scala:545:19]
wire out_uop_frs3_en; // @[util.scala:545:19]
wire out_uop_fcn_dw; // @[util.scala:545:19]
wire [4:0] out_uop_fcn_op; // @[util.scala:545:19]
wire out_uop_fp_val; // @[util.scala:545:19]
wire [2:0] out_uop_fp_rm; // @[util.scala:545:19]
wire [1:0] out_uop_fp_typ; // @[util.scala:545:19]
wire out_uop_xcpt_pf_if; // @[util.scala:545:19]
wire out_uop_xcpt_ae_if; // @[util.scala:545:19]
wire out_uop_xcpt_ma_if; // @[util.scala:545:19]
wire out_uop_bp_debug_if; // @[util.scala:545:19]
wire out_uop_bp_xcpt_if; // @[util.scala:545:19]
wire [2:0] out_uop_debug_fsrc; // @[util.scala:545:19]
wire [2:0] out_uop_debug_tsrc; // @[util.scala:545:19]
wire [33:0] out_addr; // @[util.scala:545:19]
wire [63:0] out_data; // @[util.scala:545:19]
wire out_is_hella; // @[util.scala:545:19]
wire out_tag_match; // @[util.scala:545:19]
wire [1:0] out_old_meta_coh_state; // @[util.scala:545:19]
wire [21:0] out_old_meta_tag; // @[util.scala:545:19]
wire [1:0] out_way_en; // @[util.scala:545:19]
wire [4:0] out_sdq_id; // @[util.scala:545:19]
wire _io_empty_T_1; // @[util.scala:512:27]
wire [3:0] _io_count_T_5; // @[util.scala:556:22]
wire io_enq_ready_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iq_type_0_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iq_type_1_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iq_type_2_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iq_type_3_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_0_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_1_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_2_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_3_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_4_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_5_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_6_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_7_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_8_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_9_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7]
wire [31:0] io_deq_bits_uop_inst_0; // @[util.scala:458:7]
wire [31:0] io_deq_bits_uop_debug_inst_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_rvc_0; // @[util.scala:458:7]
wire [33:0] io_deq_bits_uop_debug_pc_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_issued_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7]
wire io_deq_bits_uop_dis_col_sel_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_br_mask_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_br_tag_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_br_type_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_sfb_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_fence_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_fencei_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_sfence_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_amo_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_eret_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_rocc_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_mov_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_ftq_idx_0; // @[util.scala:458:7]
wire io_deq_bits_uop_edge_inst_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_pc_lob_0; // @[util.scala:458:7]
wire io_deq_bits_uop_taken_0; // @[util.scala:458:7]
wire io_deq_bits_uop_imm_rename_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_imm_sel_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_pimm_0; // @[util.scala:458:7]
wire [19:0] io_deq_bits_uop_imm_packed_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_op1_sel_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_op2_sel_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_rob_idx_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_ldq_idx_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_stq_idx_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_rxq_idx_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_pdst_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_prs1_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_prs2_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_prs3_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_ppred_0; // @[util.scala:458:7]
wire io_deq_bits_uop_prs1_busy_0; // @[util.scala:458:7]
wire io_deq_bits_uop_prs2_busy_0; // @[util.scala:458:7]
wire io_deq_bits_uop_prs3_busy_0; // @[util.scala:458:7]
wire io_deq_bits_uop_ppred_busy_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_stale_pdst_0; // @[util.scala:458:7]
wire io_deq_bits_uop_exception_0; // @[util.scala:458:7]
wire [63:0] io_deq_bits_uop_exc_cause_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_mem_cmd_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_mem_size_0; // @[util.scala:458:7]
wire io_deq_bits_uop_mem_signed_0; // @[util.scala:458:7]
wire io_deq_bits_uop_uses_ldq_0; // @[util.scala:458:7]
wire io_deq_bits_uop_uses_stq_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_unique_0; // @[util.scala:458:7]
wire io_deq_bits_uop_flush_on_commit_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_csr_cmd_0; // @[util.scala:458:7]
wire io_deq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_ldst_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_lrs1_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_lrs2_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_lrs3_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_dst_rtype_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7]
wire io_deq_bits_uop_frs3_en_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fcn_dw_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_fcn_op_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_val_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_fp_rm_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_fp_typ_0; // @[util.scala:458:7]
wire io_deq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7]
wire io_deq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7]
wire io_deq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7]
wire io_deq_bits_uop_bp_debug_if_0; // @[util.scala:458:7]
wire io_deq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_debug_fsrc_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_debug_tsrc_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_old_meta_coh_state_0; // @[util.scala:458:7]
wire [21:0] io_deq_bits_old_meta_tag_0; // @[util.scala:458:7]
wire [33:0] io_deq_bits_addr_0; // @[util.scala:458:7]
wire [63:0] io_deq_bits_data_0; // @[util.scala:458:7]
wire io_deq_bits_is_hella_0; // @[util.scala:458:7]
wire io_deq_bits_tag_match_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_way_en_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_sdq_id_0; // @[util.scala:458:7]
wire io_deq_valid_0; // @[util.scala:458:7]
wire io_empty_0; // @[util.scala:458:7]
wire [3:0] io_count_0; // @[util.scala:458:7]
assign out_addr = _ram_ext_R0_data[33:0]; // @[util.scala:503:22, :545:19]
assign out_data = _ram_ext_R0_data[97:34]; // @[util.scala:503:22, :545:19]
assign out_is_hella = _ram_ext_R0_data[98]; // @[util.scala:503:22, :545:19]
assign out_tag_match = _ram_ext_R0_data[99]; // @[util.scala:503:22, :545:19]
assign out_old_meta_coh_state = _ram_ext_R0_data[101:100]; // @[util.scala:503:22, :545:19]
assign out_old_meta_tag = _ram_ext_R0_data[123:102]; // @[util.scala:503:22, :545:19]
assign out_way_en = _ram_ext_R0_data[125:124]; // @[util.scala:503:22, :545:19]
assign out_sdq_id = _ram_ext_R0_data[130:126]; // @[util.scala:503:22, :545:19]
reg valids_0; // @[util.scala:504:26]
wire _valids_0_T_4 = valids_0; // @[util.scala:504:26, :520:31]
reg valids_1; // @[util.scala:504:26]
wire _valids_1_T_4 = valids_1; // @[util.scala:504:26, :520:31]
reg valids_2; // @[util.scala:504:26]
wire _valids_2_T_4 = valids_2; // @[util.scala:504:26, :520:31]
reg valids_3; // @[util.scala:504:26]
wire _valids_3_T_4 = valids_3; // @[util.scala:504:26, :520:31]
reg valids_4; // @[util.scala:504:26]
wire _valids_4_T_4 = valids_4; // @[util.scala:504:26, :520:31]
reg valids_5; // @[util.scala:504:26]
wire _valids_5_T_4 = valids_5; // @[util.scala:504:26, :520:31]
reg valids_6; // @[util.scala:504:26]
wire _valids_6_T_4 = valids_6; // @[util.scala:504:26, :520:31]
reg valids_7; // @[util.scala:504:26]
wire _valids_7_T_4 = valids_7; // @[util.scala:504:26, :520:31]
reg valids_8; // @[util.scala:504:26]
wire _valids_8_T_4 = valids_8; // @[util.scala:504:26, :520:31]
reg valids_9; // @[util.scala:504:26]
wire _valids_9_T_4 = valids_9; // @[util.scala:504:26, :520:31]
reg valids_10; // @[util.scala:504:26]
wire _valids_10_T_4 = valids_10; // @[util.scala:504:26, :520:31]
reg valids_11; // @[util.scala:504:26]
wire _valids_11_T_4 = valids_11; // @[util.scala:504:26, :520:31]
reg valids_12; // @[util.scala:504:26]
wire _valids_12_T_4 = valids_12; // @[util.scala:504:26, :520:31]
reg valids_13; // @[util.scala:504:26]
wire _valids_13_T_4 = valids_13; // @[util.scala:504:26, :520:31]
reg valids_14; // @[util.scala:504:26]
wire _valids_14_T_4 = valids_14; // @[util.scala:504:26, :520:31]
reg [31:0] uops_0_inst; // @[util.scala:505:22]
reg [31:0] uops_0_debug_inst; // @[util.scala:505:22]
reg uops_0_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_0_debug_pc; // @[util.scala:505:22]
reg uops_0_iq_type_0; // @[util.scala:505:22]
reg uops_0_iq_type_1; // @[util.scala:505:22]
reg uops_0_iq_type_2; // @[util.scala:505:22]
reg uops_0_iq_type_3; // @[util.scala:505:22]
reg uops_0_fu_code_0; // @[util.scala:505:22]
reg uops_0_fu_code_1; // @[util.scala:505:22]
reg uops_0_fu_code_2; // @[util.scala:505:22]
reg uops_0_fu_code_3; // @[util.scala:505:22]
reg uops_0_fu_code_4; // @[util.scala:505:22]
reg uops_0_fu_code_5; // @[util.scala:505:22]
reg uops_0_fu_code_6; // @[util.scala:505:22]
reg uops_0_fu_code_7; // @[util.scala:505:22]
reg uops_0_fu_code_8; // @[util.scala:505:22]
reg uops_0_fu_code_9; // @[util.scala:505:22]
reg uops_0_iw_issued; // @[util.scala:505:22]
reg uops_0_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_0_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_0_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_0_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_0_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_0_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_0_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_0_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_0_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_0_br_mask_T_1 = uops_0_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_0_br_tag; // @[util.scala:505:22]
reg [3:0] uops_0_br_type; // @[util.scala:505:22]
reg uops_0_is_sfb; // @[util.scala:505:22]
reg uops_0_is_fence; // @[util.scala:505:22]
reg uops_0_is_fencei; // @[util.scala:505:22]
reg uops_0_is_sfence; // @[util.scala:505:22]
reg uops_0_is_amo; // @[util.scala:505:22]
reg uops_0_is_eret; // @[util.scala:505:22]
reg uops_0_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_0_is_rocc; // @[util.scala:505:22]
reg uops_0_is_mov; // @[util.scala:505:22]
reg [3:0] uops_0_ftq_idx; // @[util.scala:505:22]
reg uops_0_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_0_pc_lob; // @[util.scala:505:22]
reg uops_0_taken; // @[util.scala:505:22]
reg uops_0_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_0_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_0_pimm; // @[util.scala:505:22]
reg [19:0] uops_0_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_0_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_0_op2_sel; // @[util.scala:505:22]
reg uops_0_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_0_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_0_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_0_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_0_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_0_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_0_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_0_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_0_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_0_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_0_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_0_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_0_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_0_fp_ctrl_div; // @[util.scala:505:22]
reg uops_0_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_0_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_0_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_0_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_0_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_0_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_0_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_0_pdst; // @[util.scala:505:22]
reg [5:0] uops_0_prs1; // @[util.scala:505:22]
reg [5:0] uops_0_prs2; // @[util.scala:505:22]
reg [5:0] uops_0_prs3; // @[util.scala:505:22]
reg [3:0] uops_0_ppred; // @[util.scala:505:22]
reg uops_0_prs1_busy; // @[util.scala:505:22]
reg uops_0_prs2_busy; // @[util.scala:505:22]
reg uops_0_prs3_busy; // @[util.scala:505:22]
reg uops_0_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_0_stale_pdst; // @[util.scala:505:22]
reg uops_0_exception; // @[util.scala:505:22]
reg [63:0] uops_0_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_0_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_0_mem_size; // @[util.scala:505:22]
reg uops_0_mem_signed; // @[util.scala:505:22]
reg uops_0_uses_ldq; // @[util.scala:505:22]
reg uops_0_uses_stq; // @[util.scala:505:22]
reg uops_0_is_unique; // @[util.scala:505:22]
reg uops_0_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_0_csr_cmd; // @[util.scala:505:22]
reg uops_0_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_0_ldst; // @[util.scala:505:22]
reg [5:0] uops_0_lrs1; // @[util.scala:505:22]
reg [5:0] uops_0_lrs2; // @[util.scala:505:22]
reg [5:0] uops_0_lrs3; // @[util.scala:505:22]
reg [1:0] uops_0_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_0_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_0_lrs2_rtype; // @[util.scala:505:22]
reg uops_0_frs3_en; // @[util.scala:505:22]
reg uops_0_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_0_fcn_op; // @[util.scala:505:22]
reg uops_0_fp_val; // @[util.scala:505:22]
reg [2:0] uops_0_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_0_fp_typ; // @[util.scala:505:22]
reg uops_0_xcpt_pf_if; // @[util.scala:505:22]
reg uops_0_xcpt_ae_if; // @[util.scala:505:22]
reg uops_0_xcpt_ma_if; // @[util.scala:505:22]
reg uops_0_bp_debug_if; // @[util.scala:505:22]
reg uops_0_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_0_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_0_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_1_inst; // @[util.scala:505:22]
reg [31:0] uops_1_debug_inst; // @[util.scala:505:22]
reg uops_1_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_1_debug_pc; // @[util.scala:505:22]
reg uops_1_iq_type_0; // @[util.scala:505:22]
reg uops_1_iq_type_1; // @[util.scala:505:22]
reg uops_1_iq_type_2; // @[util.scala:505:22]
reg uops_1_iq_type_3; // @[util.scala:505:22]
reg uops_1_fu_code_0; // @[util.scala:505:22]
reg uops_1_fu_code_1; // @[util.scala:505:22]
reg uops_1_fu_code_2; // @[util.scala:505:22]
reg uops_1_fu_code_3; // @[util.scala:505:22]
reg uops_1_fu_code_4; // @[util.scala:505:22]
reg uops_1_fu_code_5; // @[util.scala:505:22]
reg uops_1_fu_code_6; // @[util.scala:505:22]
reg uops_1_fu_code_7; // @[util.scala:505:22]
reg uops_1_fu_code_8; // @[util.scala:505:22]
reg uops_1_fu_code_9; // @[util.scala:505:22]
reg uops_1_iw_issued; // @[util.scala:505:22]
reg uops_1_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_1_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_1_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_1_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_1_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_1_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_1_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_1_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_1_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_1_br_mask_T_1 = uops_1_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_1_br_tag; // @[util.scala:505:22]
reg [3:0] uops_1_br_type; // @[util.scala:505:22]
reg uops_1_is_sfb; // @[util.scala:505:22]
reg uops_1_is_fence; // @[util.scala:505:22]
reg uops_1_is_fencei; // @[util.scala:505:22]
reg uops_1_is_sfence; // @[util.scala:505:22]
reg uops_1_is_amo; // @[util.scala:505:22]
reg uops_1_is_eret; // @[util.scala:505:22]
reg uops_1_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_1_is_rocc; // @[util.scala:505:22]
reg uops_1_is_mov; // @[util.scala:505:22]
reg [3:0] uops_1_ftq_idx; // @[util.scala:505:22]
reg uops_1_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_1_pc_lob; // @[util.scala:505:22]
reg uops_1_taken; // @[util.scala:505:22]
reg uops_1_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_1_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_1_pimm; // @[util.scala:505:22]
reg [19:0] uops_1_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_1_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_1_op2_sel; // @[util.scala:505:22]
reg uops_1_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_1_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_1_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_1_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_1_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_1_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_1_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_1_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_1_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_1_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_1_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_1_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_1_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_1_fp_ctrl_div; // @[util.scala:505:22]
reg uops_1_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_1_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_1_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_1_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_1_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_1_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_1_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_1_pdst; // @[util.scala:505:22]
reg [5:0] uops_1_prs1; // @[util.scala:505:22]
reg [5:0] uops_1_prs2; // @[util.scala:505:22]
reg [5:0] uops_1_prs3; // @[util.scala:505:22]
reg [3:0] uops_1_ppred; // @[util.scala:505:22]
reg uops_1_prs1_busy; // @[util.scala:505:22]
reg uops_1_prs2_busy; // @[util.scala:505:22]
reg uops_1_prs3_busy; // @[util.scala:505:22]
reg uops_1_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_1_stale_pdst; // @[util.scala:505:22]
reg uops_1_exception; // @[util.scala:505:22]
reg [63:0] uops_1_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_1_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_1_mem_size; // @[util.scala:505:22]
reg uops_1_mem_signed; // @[util.scala:505:22]
reg uops_1_uses_ldq; // @[util.scala:505:22]
reg uops_1_uses_stq; // @[util.scala:505:22]
reg uops_1_is_unique; // @[util.scala:505:22]
reg uops_1_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_1_csr_cmd; // @[util.scala:505:22]
reg uops_1_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_1_ldst; // @[util.scala:505:22]
reg [5:0] uops_1_lrs1; // @[util.scala:505:22]
reg [5:0] uops_1_lrs2; // @[util.scala:505:22]
reg [5:0] uops_1_lrs3; // @[util.scala:505:22]
reg [1:0] uops_1_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_1_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_1_lrs2_rtype; // @[util.scala:505:22]
reg uops_1_frs3_en; // @[util.scala:505:22]
reg uops_1_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_1_fcn_op; // @[util.scala:505:22]
reg uops_1_fp_val; // @[util.scala:505:22]
reg [2:0] uops_1_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_1_fp_typ; // @[util.scala:505:22]
reg uops_1_xcpt_pf_if; // @[util.scala:505:22]
reg uops_1_xcpt_ae_if; // @[util.scala:505:22]
reg uops_1_xcpt_ma_if; // @[util.scala:505:22]
reg uops_1_bp_debug_if; // @[util.scala:505:22]
reg uops_1_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_1_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_1_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_2_inst; // @[util.scala:505:22]
reg [31:0] uops_2_debug_inst; // @[util.scala:505:22]
reg uops_2_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_2_debug_pc; // @[util.scala:505:22]
reg uops_2_iq_type_0; // @[util.scala:505:22]
reg uops_2_iq_type_1; // @[util.scala:505:22]
reg uops_2_iq_type_2; // @[util.scala:505:22]
reg uops_2_iq_type_3; // @[util.scala:505:22]
reg uops_2_fu_code_0; // @[util.scala:505:22]
reg uops_2_fu_code_1; // @[util.scala:505:22]
reg uops_2_fu_code_2; // @[util.scala:505:22]
reg uops_2_fu_code_3; // @[util.scala:505:22]
reg uops_2_fu_code_4; // @[util.scala:505:22]
reg uops_2_fu_code_5; // @[util.scala:505:22]
reg uops_2_fu_code_6; // @[util.scala:505:22]
reg uops_2_fu_code_7; // @[util.scala:505:22]
reg uops_2_fu_code_8; // @[util.scala:505:22]
reg uops_2_fu_code_9; // @[util.scala:505:22]
reg uops_2_iw_issued; // @[util.scala:505:22]
reg uops_2_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_2_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_2_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_2_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_2_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_2_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_2_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_2_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_2_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_2_br_mask_T_1 = uops_2_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_2_br_tag; // @[util.scala:505:22]
reg [3:0] uops_2_br_type; // @[util.scala:505:22]
reg uops_2_is_sfb; // @[util.scala:505:22]
reg uops_2_is_fence; // @[util.scala:505:22]
reg uops_2_is_fencei; // @[util.scala:505:22]
reg uops_2_is_sfence; // @[util.scala:505:22]
reg uops_2_is_amo; // @[util.scala:505:22]
reg uops_2_is_eret; // @[util.scala:505:22]
reg uops_2_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_2_is_rocc; // @[util.scala:505:22]
reg uops_2_is_mov; // @[util.scala:505:22]
reg [3:0] uops_2_ftq_idx; // @[util.scala:505:22]
reg uops_2_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_2_pc_lob; // @[util.scala:505:22]
reg uops_2_taken; // @[util.scala:505:22]
reg uops_2_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_2_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_2_pimm; // @[util.scala:505:22]
reg [19:0] uops_2_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_2_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_2_op2_sel; // @[util.scala:505:22]
reg uops_2_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_2_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_2_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_2_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_2_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_2_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_2_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_2_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_2_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_2_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_2_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_2_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_2_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_2_fp_ctrl_div; // @[util.scala:505:22]
reg uops_2_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_2_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_2_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_2_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_2_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_2_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_2_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_2_pdst; // @[util.scala:505:22]
reg [5:0] uops_2_prs1; // @[util.scala:505:22]
reg [5:0] uops_2_prs2; // @[util.scala:505:22]
reg [5:0] uops_2_prs3; // @[util.scala:505:22]
reg [3:0] uops_2_ppred; // @[util.scala:505:22]
reg uops_2_prs1_busy; // @[util.scala:505:22]
reg uops_2_prs2_busy; // @[util.scala:505:22]
reg uops_2_prs3_busy; // @[util.scala:505:22]
reg uops_2_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_2_stale_pdst; // @[util.scala:505:22]
reg uops_2_exception; // @[util.scala:505:22]
reg [63:0] uops_2_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_2_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_2_mem_size; // @[util.scala:505:22]
reg uops_2_mem_signed; // @[util.scala:505:22]
reg uops_2_uses_ldq; // @[util.scala:505:22]
reg uops_2_uses_stq; // @[util.scala:505:22]
reg uops_2_is_unique; // @[util.scala:505:22]
reg uops_2_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_2_csr_cmd; // @[util.scala:505:22]
reg uops_2_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_2_ldst; // @[util.scala:505:22]
reg [5:0] uops_2_lrs1; // @[util.scala:505:22]
reg [5:0] uops_2_lrs2; // @[util.scala:505:22]
reg [5:0] uops_2_lrs3; // @[util.scala:505:22]
reg [1:0] uops_2_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_2_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_2_lrs2_rtype; // @[util.scala:505:22]
reg uops_2_frs3_en; // @[util.scala:505:22]
reg uops_2_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_2_fcn_op; // @[util.scala:505:22]
reg uops_2_fp_val; // @[util.scala:505:22]
reg [2:0] uops_2_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_2_fp_typ; // @[util.scala:505:22]
reg uops_2_xcpt_pf_if; // @[util.scala:505:22]
reg uops_2_xcpt_ae_if; // @[util.scala:505:22]
reg uops_2_xcpt_ma_if; // @[util.scala:505:22]
reg uops_2_bp_debug_if; // @[util.scala:505:22]
reg uops_2_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_2_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_2_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_3_inst; // @[util.scala:505:22]
reg [31:0] uops_3_debug_inst; // @[util.scala:505:22]
reg uops_3_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_3_debug_pc; // @[util.scala:505:22]
reg uops_3_iq_type_0; // @[util.scala:505:22]
reg uops_3_iq_type_1; // @[util.scala:505:22]
reg uops_3_iq_type_2; // @[util.scala:505:22]
reg uops_3_iq_type_3; // @[util.scala:505:22]
reg uops_3_fu_code_0; // @[util.scala:505:22]
reg uops_3_fu_code_1; // @[util.scala:505:22]
reg uops_3_fu_code_2; // @[util.scala:505:22]
reg uops_3_fu_code_3; // @[util.scala:505:22]
reg uops_3_fu_code_4; // @[util.scala:505:22]
reg uops_3_fu_code_5; // @[util.scala:505:22]
reg uops_3_fu_code_6; // @[util.scala:505:22]
reg uops_3_fu_code_7; // @[util.scala:505:22]
reg uops_3_fu_code_8; // @[util.scala:505:22]
reg uops_3_fu_code_9; // @[util.scala:505:22]
reg uops_3_iw_issued; // @[util.scala:505:22]
reg uops_3_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_3_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_3_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_3_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_3_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_3_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_3_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_3_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_3_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_3_br_mask_T_1 = uops_3_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_3_br_tag; // @[util.scala:505:22]
reg [3:0] uops_3_br_type; // @[util.scala:505:22]
reg uops_3_is_sfb; // @[util.scala:505:22]
reg uops_3_is_fence; // @[util.scala:505:22]
reg uops_3_is_fencei; // @[util.scala:505:22]
reg uops_3_is_sfence; // @[util.scala:505:22]
reg uops_3_is_amo; // @[util.scala:505:22]
reg uops_3_is_eret; // @[util.scala:505:22]
reg uops_3_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_3_is_rocc; // @[util.scala:505:22]
reg uops_3_is_mov; // @[util.scala:505:22]
reg [3:0] uops_3_ftq_idx; // @[util.scala:505:22]
reg uops_3_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_3_pc_lob; // @[util.scala:505:22]
reg uops_3_taken; // @[util.scala:505:22]
reg uops_3_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_3_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_3_pimm; // @[util.scala:505:22]
reg [19:0] uops_3_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_3_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_3_op2_sel; // @[util.scala:505:22]
reg uops_3_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_3_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_3_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_3_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_3_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_3_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_3_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_3_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_3_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_3_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_3_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_3_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_3_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_3_fp_ctrl_div; // @[util.scala:505:22]
reg uops_3_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_3_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_3_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_3_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_3_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_3_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_3_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_3_pdst; // @[util.scala:505:22]
reg [5:0] uops_3_prs1; // @[util.scala:505:22]
reg [5:0] uops_3_prs2; // @[util.scala:505:22]
reg [5:0] uops_3_prs3; // @[util.scala:505:22]
reg [3:0] uops_3_ppred; // @[util.scala:505:22]
reg uops_3_prs1_busy; // @[util.scala:505:22]
reg uops_3_prs2_busy; // @[util.scala:505:22]
reg uops_3_prs3_busy; // @[util.scala:505:22]
reg uops_3_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_3_stale_pdst; // @[util.scala:505:22]
reg uops_3_exception; // @[util.scala:505:22]
reg [63:0] uops_3_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_3_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_3_mem_size; // @[util.scala:505:22]
reg uops_3_mem_signed; // @[util.scala:505:22]
reg uops_3_uses_ldq; // @[util.scala:505:22]
reg uops_3_uses_stq; // @[util.scala:505:22]
reg uops_3_is_unique; // @[util.scala:505:22]
reg uops_3_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_3_csr_cmd; // @[util.scala:505:22]
reg uops_3_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_3_ldst; // @[util.scala:505:22]
reg [5:0] uops_3_lrs1; // @[util.scala:505:22]
reg [5:0] uops_3_lrs2; // @[util.scala:505:22]
reg [5:0] uops_3_lrs3; // @[util.scala:505:22]
reg [1:0] uops_3_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_3_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_3_lrs2_rtype; // @[util.scala:505:22]
reg uops_3_frs3_en; // @[util.scala:505:22]
reg uops_3_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_3_fcn_op; // @[util.scala:505:22]
reg uops_3_fp_val; // @[util.scala:505:22]
reg [2:0] uops_3_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_3_fp_typ; // @[util.scala:505:22]
reg uops_3_xcpt_pf_if; // @[util.scala:505:22]
reg uops_3_xcpt_ae_if; // @[util.scala:505:22]
reg uops_3_xcpt_ma_if; // @[util.scala:505:22]
reg uops_3_bp_debug_if; // @[util.scala:505:22]
reg uops_3_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_3_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_3_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_4_inst; // @[util.scala:505:22]
reg [31:0] uops_4_debug_inst; // @[util.scala:505:22]
reg uops_4_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_4_debug_pc; // @[util.scala:505:22]
reg uops_4_iq_type_0; // @[util.scala:505:22]
reg uops_4_iq_type_1; // @[util.scala:505:22]
reg uops_4_iq_type_2; // @[util.scala:505:22]
reg uops_4_iq_type_3; // @[util.scala:505:22]
reg uops_4_fu_code_0; // @[util.scala:505:22]
reg uops_4_fu_code_1; // @[util.scala:505:22]
reg uops_4_fu_code_2; // @[util.scala:505:22]
reg uops_4_fu_code_3; // @[util.scala:505:22]
reg uops_4_fu_code_4; // @[util.scala:505:22]
reg uops_4_fu_code_5; // @[util.scala:505:22]
reg uops_4_fu_code_6; // @[util.scala:505:22]
reg uops_4_fu_code_7; // @[util.scala:505:22]
reg uops_4_fu_code_8; // @[util.scala:505:22]
reg uops_4_fu_code_9; // @[util.scala:505:22]
reg uops_4_iw_issued; // @[util.scala:505:22]
reg uops_4_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_4_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_4_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_4_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_4_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_4_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_4_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_4_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_4_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_4_br_mask_T_1 = uops_4_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_4_br_tag; // @[util.scala:505:22]
reg [3:0] uops_4_br_type; // @[util.scala:505:22]
reg uops_4_is_sfb; // @[util.scala:505:22]
reg uops_4_is_fence; // @[util.scala:505:22]
reg uops_4_is_fencei; // @[util.scala:505:22]
reg uops_4_is_sfence; // @[util.scala:505:22]
reg uops_4_is_amo; // @[util.scala:505:22]
reg uops_4_is_eret; // @[util.scala:505:22]
reg uops_4_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_4_is_rocc; // @[util.scala:505:22]
reg uops_4_is_mov; // @[util.scala:505:22]
reg [3:0] uops_4_ftq_idx; // @[util.scala:505:22]
reg uops_4_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_4_pc_lob; // @[util.scala:505:22]
reg uops_4_taken; // @[util.scala:505:22]
reg uops_4_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_4_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_4_pimm; // @[util.scala:505:22]
reg [19:0] uops_4_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_4_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_4_op2_sel; // @[util.scala:505:22]
reg uops_4_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_4_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_4_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_4_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_4_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_4_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_4_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_4_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_4_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_4_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_4_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_4_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_4_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_4_fp_ctrl_div; // @[util.scala:505:22]
reg uops_4_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_4_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_4_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_4_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_4_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_4_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_4_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_4_pdst; // @[util.scala:505:22]
reg [5:0] uops_4_prs1; // @[util.scala:505:22]
reg [5:0] uops_4_prs2; // @[util.scala:505:22]
reg [5:0] uops_4_prs3; // @[util.scala:505:22]
reg [3:0] uops_4_ppred; // @[util.scala:505:22]
reg uops_4_prs1_busy; // @[util.scala:505:22]
reg uops_4_prs2_busy; // @[util.scala:505:22]
reg uops_4_prs3_busy; // @[util.scala:505:22]
reg uops_4_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_4_stale_pdst; // @[util.scala:505:22]
reg uops_4_exception; // @[util.scala:505:22]
reg [63:0] uops_4_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_4_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_4_mem_size; // @[util.scala:505:22]
reg uops_4_mem_signed; // @[util.scala:505:22]
reg uops_4_uses_ldq; // @[util.scala:505:22]
reg uops_4_uses_stq; // @[util.scala:505:22]
reg uops_4_is_unique; // @[util.scala:505:22]
reg uops_4_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_4_csr_cmd; // @[util.scala:505:22]
reg uops_4_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_4_ldst; // @[util.scala:505:22]
reg [5:0] uops_4_lrs1; // @[util.scala:505:22]
reg [5:0] uops_4_lrs2; // @[util.scala:505:22]
reg [5:0] uops_4_lrs3; // @[util.scala:505:22]
reg [1:0] uops_4_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_4_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_4_lrs2_rtype; // @[util.scala:505:22]
reg uops_4_frs3_en; // @[util.scala:505:22]
reg uops_4_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_4_fcn_op; // @[util.scala:505:22]
reg uops_4_fp_val; // @[util.scala:505:22]
reg [2:0] uops_4_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_4_fp_typ; // @[util.scala:505:22]
reg uops_4_xcpt_pf_if; // @[util.scala:505:22]
reg uops_4_xcpt_ae_if; // @[util.scala:505:22]
reg uops_4_xcpt_ma_if; // @[util.scala:505:22]
reg uops_4_bp_debug_if; // @[util.scala:505:22]
reg uops_4_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_4_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_4_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_5_inst; // @[util.scala:505:22]
reg [31:0] uops_5_debug_inst; // @[util.scala:505:22]
reg uops_5_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_5_debug_pc; // @[util.scala:505:22]
reg uops_5_iq_type_0; // @[util.scala:505:22]
reg uops_5_iq_type_1; // @[util.scala:505:22]
reg uops_5_iq_type_2; // @[util.scala:505:22]
reg uops_5_iq_type_3; // @[util.scala:505:22]
reg uops_5_fu_code_0; // @[util.scala:505:22]
reg uops_5_fu_code_1; // @[util.scala:505:22]
reg uops_5_fu_code_2; // @[util.scala:505:22]
reg uops_5_fu_code_3; // @[util.scala:505:22]
reg uops_5_fu_code_4; // @[util.scala:505:22]
reg uops_5_fu_code_5; // @[util.scala:505:22]
reg uops_5_fu_code_6; // @[util.scala:505:22]
reg uops_5_fu_code_7; // @[util.scala:505:22]
reg uops_5_fu_code_8; // @[util.scala:505:22]
reg uops_5_fu_code_9; // @[util.scala:505:22]
reg uops_5_iw_issued; // @[util.scala:505:22]
reg uops_5_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_5_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_5_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_5_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_5_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_5_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_5_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_5_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_5_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_5_br_mask_T_1 = uops_5_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_5_br_tag; // @[util.scala:505:22]
reg [3:0] uops_5_br_type; // @[util.scala:505:22]
reg uops_5_is_sfb; // @[util.scala:505:22]
reg uops_5_is_fence; // @[util.scala:505:22]
reg uops_5_is_fencei; // @[util.scala:505:22]
reg uops_5_is_sfence; // @[util.scala:505:22]
reg uops_5_is_amo; // @[util.scala:505:22]
reg uops_5_is_eret; // @[util.scala:505:22]
reg uops_5_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_5_is_rocc; // @[util.scala:505:22]
reg uops_5_is_mov; // @[util.scala:505:22]
reg [3:0] uops_5_ftq_idx; // @[util.scala:505:22]
reg uops_5_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_5_pc_lob; // @[util.scala:505:22]
reg uops_5_taken; // @[util.scala:505:22]
reg uops_5_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_5_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_5_pimm; // @[util.scala:505:22]
reg [19:0] uops_5_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_5_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_5_op2_sel; // @[util.scala:505:22]
reg uops_5_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_5_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_5_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_5_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_5_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_5_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_5_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_5_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_5_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_5_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_5_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_5_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_5_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_5_fp_ctrl_div; // @[util.scala:505:22]
reg uops_5_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_5_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_5_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_5_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_5_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_5_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_5_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_5_pdst; // @[util.scala:505:22]
reg [5:0] uops_5_prs1; // @[util.scala:505:22]
reg [5:0] uops_5_prs2; // @[util.scala:505:22]
reg [5:0] uops_5_prs3; // @[util.scala:505:22]
reg [3:0] uops_5_ppred; // @[util.scala:505:22]
reg uops_5_prs1_busy; // @[util.scala:505:22]
reg uops_5_prs2_busy; // @[util.scala:505:22]
reg uops_5_prs3_busy; // @[util.scala:505:22]
reg uops_5_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_5_stale_pdst; // @[util.scala:505:22]
reg uops_5_exception; // @[util.scala:505:22]
reg [63:0] uops_5_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_5_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_5_mem_size; // @[util.scala:505:22]
reg uops_5_mem_signed; // @[util.scala:505:22]
reg uops_5_uses_ldq; // @[util.scala:505:22]
reg uops_5_uses_stq; // @[util.scala:505:22]
reg uops_5_is_unique; // @[util.scala:505:22]
reg uops_5_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_5_csr_cmd; // @[util.scala:505:22]
reg uops_5_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_5_ldst; // @[util.scala:505:22]
reg [5:0] uops_5_lrs1; // @[util.scala:505:22]
reg [5:0] uops_5_lrs2; // @[util.scala:505:22]
reg [5:0] uops_5_lrs3; // @[util.scala:505:22]
reg [1:0] uops_5_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_5_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_5_lrs2_rtype; // @[util.scala:505:22]
reg uops_5_frs3_en; // @[util.scala:505:22]
reg uops_5_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_5_fcn_op; // @[util.scala:505:22]
reg uops_5_fp_val; // @[util.scala:505:22]
reg [2:0] uops_5_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_5_fp_typ; // @[util.scala:505:22]
reg uops_5_xcpt_pf_if; // @[util.scala:505:22]
reg uops_5_xcpt_ae_if; // @[util.scala:505:22]
reg uops_5_xcpt_ma_if; // @[util.scala:505:22]
reg uops_5_bp_debug_if; // @[util.scala:505:22]
reg uops_5_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_5_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_5_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_6_inst; // @[util.scala:505:22]
reg [31:0] uops_6_debug_inst; // @[util.scala:505:22]
reg uops_6_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_6_debug_pc; // @[util.scala:505:22]
reg uops_6_iq_type_0; // @[util.scala:505:22]
reg uops_6_iq_type_1; // @[util.scala:505:22]
reg uops_6_iq_type_2; // @[util.scala:505:22]
reg uops_6_iq_type_3; // @[util.scala:505:22]
reg uops_6_fu_code_0; // @[util.scala:505:22]
reg uops_6_fu_code_1; // @[util.scala:505:22]
reg uops_6_fu_code_2; // @[util.scala:505:22]
reg uops_6_fu_code_3; // @[util.scala:505:22]
reg uops_6_fu_code_4; // @[util.scala:505:22]
reg uops_6_fu_code_5; // @[util.scala:505:22]
reg uops_6_fu_code_6; // @[util.scala:505:22]
reg uops_6_fu_code_7; // @[util.scala:505:22]
reg uops_6_fu_code_8; // @[util.scala:505:22]
reg uops_6_fu_code_9; // @[util.scala:505:22]
reg uops_6_iw_issued; // @[util.scala:505:22]
reg uops_6_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_6_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_6_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_6_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_6_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_6_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_6_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_6_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_6_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_6_br_mask_T_1 = uops_6_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_6_br_tag; // @[util.scala:505:22]
reg [3:0] uops_6_br_type; // @[util.scala:505:22]
reg uops_6_is_sfb; // @[util.scala:505:22]
reg uops_6_is_fence; // @[util.scala:505:22]
reg uops_6_is_fencei; // @[util.scala:505:22]
reg uops_6_is_sfence; // @[util.scala:505:22]
reg uops_6_is_amo; // @[util.scala:505:22]
reg uops_6_is_eret; // @[util.scala:505:22]
reg uops_6_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_6_is_rocc; // @[util.scala:505:22]
reg uops_6_is_mov; // @[util.scala:505:22]
reg [3:0] uops_6_ftq_idx; // @[util.scala:505:22]
reg uops_6_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_6_pc_lob; // @[util.scala:505:22]
reg uops_6_taken; // @[util.scala:505:22]
reg uops_6_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_6_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_6_pimm; // @[util.scala:505:22]
reg [19:0] uops_6_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_6_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_6_op2_sel; // @[util.scala:505:22]
reg uops_6_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_6_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_6_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_6_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_6_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_6_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_6_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_6_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_6_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_6_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_6_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_6_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_6_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_6_fp_ctrl_div; // @[util.scala:505:22]
reg uops_6_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_6_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_6_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_6_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_6_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_6_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_6_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_6_pdst; // @[util.scala:505:22]
reg [5:0] uops_6_prs1; // @[util.scala:505:22]
reg [5:0] uops_6_prs2; // @[util.scala:505:22]
reg [5:0] uops_6_prs3; // @[util.scala:505:22]
reg [3:0] uops_6_ppred; // @[util.scala:505:22]
reg uops_6_prs1_busy; // @[util.scala:505:22]
reg uops_6_prs2_busy; // @[util.scala:505:22]
reg uops_6_prs3_busy; // @[util.scala:505:22]
reg uops_6_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_6_stale_pdst; // @[util.scala:505:22]
reg uops_6_exception; // @[util.scala:505:22]
reg [63:0] uops_6_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_6_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_6_mem_size; // @[util.scala:505:22]
reg uops_6_mem_signed; // @[util.scala:505:22]
reg uops_6_uses_ldq; // @[util.scala:505:22]
reg uops_6_uses_stq; // @[util.scala:505:22]
reg uops_6_is_unique; // @[util.scala:505:22]
reg uops_6_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_6_csr_cmd; // @[util.scala:505:22]
reg uops_6_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_6_ldst; // @[util.scala:505:22]
reg [5:0] uops_6_lrs1; // @[util.scala:505:22]
reg [5:0] uops_6_lrs2; // @[util.scala:505:22]
reg [5:0] uops_6_lrs3; // @[util.scala:505:22]
reg [1:0] uops_6_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_6_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_6_lrs2_rtype; // @[util.scala:505:22]
reg uops_6_frs3_en; // @[util.scala:505:22]
reg uops_6_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_6_fcn_op; // @[util.scala:505:22]
reg uops_6_fp_val; // @[util.scala:505:22]
reg [2:0] uops_6_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_6_fp_typ; // @[util.scala:505:22]
reg uops_6_xcpt_pf_if; // @[util.scala:505:22]
reg uops_6_xcpt_ae_if; // @[util.scala:505:22]
reg uops_6_xcpt_ma_if; // @[util.scala:505:22]
reg uops_6_bp_debug_if; // @[util.scala:505:22]
reg uops_6_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_6_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_6_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_7_inst; // @[util.scala:505:22]
reg [31:0] uops_7_debug_inst; // @[util.scala:505:22]
reg uops_7_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_7_debug_pc; // @[util.scala:505:22]
reg uops_7_iq_type_0; // @[util.scala:505:22]
reg uops_7_iq_type_1; // @[util.scala:505:22]
reg uops_7_iq_type_2; // @[util.scala:505:22]
reg uops_7_iq_type_3; // @[util.scala:505:22]
reg uops_7_fu_code_0; // @[util.scala:505:22]
reg uops_7_fu_code_1; // @[util.scala:505:22]
reg uops_7_fu_code_2; // @[util.scala:505:22]
reg uops_7_fu_code_3; // @[util.scala:505:22]
reg uops_7_fu_code_4; // @[util.scala:505:22]
reg uops_7_fu_code_5; // @[util.scala:505:22]
reg uops_7_fu_code_6; // @[util.scala:505:22]
reg uops_7_fu_code_7; // @[util.scala:505:22]
reg uops_7_fu_code_8; // @[util.scala:505:22]
reg uops_7_fu_code_9; // @[util.scala:505:22]
reg uops_7_iw_issued; // @[util.scala:505:22]
reg uops_7_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_7_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_7_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_7_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_7_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_7_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_7_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_7_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_7_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_7_br_mask_T_1 = uops_7_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_7_br_tag; // @[util.scala:505:22]
reg [3:0] uops_7_br_type; // @[util.scala:505:22]
reg uops_7_is_sfb; // @[util.scala:505:22]
reg uops_7_is_fence; // @[util.scala:505:22]
reg uops_7_is_fencei; // @[util.scala:505:22]
reg uops_7_is_sfence; // @[util.scala:505:22]
reg uops_7_is_amo; // @[util.scala:505:22]
reg uops_7_is_eret; // @[util.scala:505:22]
reg uops_7_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_7_is_rocc; // @[util.scala:505:22]
reg uops_7_is_mov; // @[util.scala:505:22]
reg [3:0] uops_7_ftq_idx; // @[util.scala:505:22]
reg uops_7_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_7_pc_lob; // @[util.scala:505:22]
reg uops_7_taken; // @[util.scala:505:22]
reg uops_7_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_7_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_7_pimm; // @[util.scala:505:22]
reg [19:0] uops_7_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_7_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_7_op2_sel; // @[util.scala:505:22]
reg uops_7_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_7_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_7_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_7_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_7_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_7_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_7_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_7_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_7_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_7_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_7_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_7_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_7_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_7_fp_ctrl_div; // @[util.scala:505:22]
reg uops_7_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_7_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_7_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_7_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_7_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_7_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_7_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_7_pdst; // @[util.scala:505:22]
reg [5:0] uops_7_prs1; // @[util.scala:505:22]
reg [5:0] uops_7_prs2; // @[util.scala:505:22]
reg [5:0] uops_7_prs3; // @[util.scala:505:22]
reg [3:0] uops_7_ppred; // @[util.scala:505:22]
reg uops_7_prs1_busy; // @[util.scala:505:22]
reg uops_7_prs2_busy; // @[util.scala:505:22]
reg uops_7_prs3_busy; // @[util.scala:505:22]
reg uops_7_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_7_stale_pdst; // @[util.scala:505:22]
reg uops_7_exception; // @[util.scala:505:22]
reg [63:0] uops_7_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_7_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_7_mem_size; // @[util.scala:505:22]
reg uops_7_mem_signed; // @[util.scala:505:22]
reg uops_7_uses_ldq; // @[util.scala:505:22]
reg uops_7_uses_stq; // @[util.scala:505:22]
reg uops_7_is_unique; // @[util.scala:505:22]
reg uops_7_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_7_csr_cmd; // @[util.scala:505:22]
reg uops_7_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_7_ldst; // @[util.scala:505:22]
reg [5:0] uops_7_lrs1; // @[util.scala:505:22]
reg [5:0] uops_7_lrs2; // @[util.scala:505:22]
reg [5:0] uops_7_lrs3; // @[util.scala:505:22]
reg [1:0] uops_7_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_7_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_7_lrs2_rtype; // @[util.scala:505:22]
reg uops_7_frs3_en; // @[util.scala:505:22]
reg uops_7_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_7_fcn_op; // @[util.scala:505:22]
reg uops_7_fp_val; // @[util.scala:505:22]
reg [2:0] uops_7_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_7_fp_typ; // @[util.scala:505:22]
reg uops_7_xcpt_pf_if; // @[util.scala:505:22]
reg uops_7_xcpt_ae_if; // @[util.scala:505:22]
reg uops_7_xcpt_ma_if; // @[util.scala:505:22]
reg uops_7_bp_debug_if; // @[util.scala:505:22]
reg uops_7_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_7_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_7_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_8_inst; // @[util.scala:505:22]
reg [31:0] uops_8_debug_inst; // @[util.scala:505:22]
reg uops_8_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_8_debug_pc; // @[util.scala:505:22]
reg uops_8_iq_type_0; // @[util.scala:505:22]
reg uops_8_iq_type_1; // @[util.scala:505:22]
reg uops_8_iq_type_2; // @[util.scala:505:22]
reg uops_8_iq_type_3; // @[util.scala:505:22]
reg uops_8_fu_code_0; // @[util.scala:505:22]
reg uops_8_fu_code_1; // @[util.scala:505:22]
reg uops_8_fu_code_2; // @[util.scala:505:22]
reg uops_8_fu_code_3; // @[util.scala:505:22]
reg uops_8_fu_code_4; // @[util.scala:505:22]
reg uops_8_fu_code_5; // @[util.scala:505:22]
reg uops_8_fu_code_6; // @[util.scala:505:22]
reg uops_8_fu_code_7; // @[util.scala:505:22]
reg uops_8_fu_code_8; // @[util.scala:505:22]
reg uops_8_fu_code_9; // @[util.scala:505:22]
reg uops_8_iw_issued; // @[util.scala:505:22]
reg uops_8_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_8_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_8_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_8_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_8_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_8_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_8_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_8_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_8_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_8_br_mask_T_1 = uops_8_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_8_br_tag; // @[util.scala:505:22]
reg [3:0] uops_8_br_type; // @[util.scala:505:22]
reg uops_8_is_sfb; // @[util.scala:505:22]
reg uops_8_is_fence; // @[util.scala:505:22]
reg uops_8_is_fencei; // @[util.scala:505:22]
reg uops_8_is_sfence; // @[util.scala:505:22]
reg uops_8_is_amo; // @[util.scala:505:22]
reg uops_8_is_eret; // @[util.scala:505:22]
reg uops_8_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_8_is_rocc; // @[util.scala:505:22]
reg uops_8_is_mov; // @[util.scala:505:22]
reg [3:0] uops_8_ftq_idx; // @[util.scala:505:22]
reg uops_8_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_8_pc_lob; // @[util.scala:505:22]
reg uops_8_taken; // @[util.scala:505:22]
reg uops_8_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_8_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_8_pimm; // @[util.scala:505:22]
reg [19:0] uops_8_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_8_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_8_op2_sel; // @[util.scala:505:22]
reg uops_8_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_8_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_8_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_8_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_8_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_8_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_8_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_8_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_8_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_8_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_8_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_8_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_8_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_8_fp_ctrl_div; // @[util.scala:505:22]
reg uops_8_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_8_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_8_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_8_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_8_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_8_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_8_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_8_pdst; // @[util.scala:505:22]
reg [5:0] uops_8_prs1; // @[util.scala:505:22]
reg [5:0] uops_8_prs2; // @[util.scala:505:22]
reg [5:0] uops_8_prs3; // @[util.scala:505:22]
reg [3:0] uops_8_ppred; // @[util.scala:505:22]
reg uops_8_prs1_busy; // @[util.scala:505:22]
reg uops_8_prs2_busy; // @[util.scala:505:22]
reg uops_8_prs3_busy; // @[util.scala:505:22]
reg uops_8_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_8_stale_pdst; // @[util.scala:505:22]
reg uops_8_exception; // @[util.scala:505:22]
reg [63:0] uops_8_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_8_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_8_mem_size; // @[util.scala:505:22]
reg uops_8_mem_signed; // @[util.scala:505:22]
reg uops_8_uses_ldq; // @[util.scala:505:22]
reg uops_8_uses_stq; // @[util.scala:505:22]
reg uops_8_is_unique; // @[util.scala:505:22]
reg uops_8_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_8_csr_cmd; // @[util.scala:505:22]
reg uops_8_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_8_ldst; // @[util.scala:505:22]
reg [5:0] uops_8_lrs1; // @[util.scala:505:22]
reg [5:0] uops_8_lrs2; // @[util.scala:505:22]
reg [5:0] uops_8_lrs3; // @[util.scala:505:22]
reg [1:0] uops_8_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_8_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_8_lrs2_rtype; // @[util.scala:505:22]
reg uops_8_frs3_en; // @[util.scala:505:22]
reg uops_8_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_8_fcn_op; // @[util.scala:505:22]
reg uops_8_fp_val; // @[util.scala:505:22]
reg [2:0] uops_8_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_8_fp_typ; // @[util.scala:505:22]
reg uops_8_xcpt_pf_if; // @[util.scala:505:22]
reg uops_8_xcpt_ae_if; // @[util.scala:505:22]
reg uops_8_xcpt_ma_if; // @[util.scala:505:22]
reg uops_8_bp_debug_if; // @[util.scala:505:22]
reg uops_8_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_8_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_8_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_9_inst; // @[util.scala:505:22]
reg [31:0] uops_9_debug_inst; // @[util.scala:505:22]
reg uops_9_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_9_debug_pc; // @[util.scala:505:22]
reg uops_9_iq_type_0; // @[util.scala:505:22]
reg uops_9_iq_type_1; // @[util.scala:505:22]
reg uops_9_iq_type_2; // @[util.scala:505:22]
reg uops_9_iq_type_3; // @[util.scala:505:22]
reg uops_9_fu_code_0; // @[util.scala:505:22]
reg uops_9_fu_code_1; // @[util.scala:505:22]
reg uops_9_fu_code_2; // @[util.scala:505:22]
reg uops_9_fu_code_3; // @[util.scala:505:22]
reg uops_9_fu_code_4; // @[util.scala:505:22]
reg uops_9_fu_code_5; // @[util.scala:505:22]
reg uops_9_fu_code_6; // @[util.scala:505:22]
reg uops_9_fu_code_7; // @[util.scala:505:22]
reg uops_9_fu_code_8; // @[util.scala:505:22]
reg uops_9_fu_code_9; // @[util.scala:505:22]
reg uops_9_iw_issued; // @[util.scala:505:22]
reg uops_9_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_9_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_9_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_9_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_9_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_9_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_9_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_9_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_9_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_9_br_mask_T_1 = uops_9_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_9_br_tag; // @[util.scala:505:22]
reg [3:0] uops_9_br_type; // @[util.scala:505:22]
reg uops_9_is_sfb; // @[util.scala:505:22]
reg uops_9_is_fence; // @[util.scala:505:22]
reg uops_9_is_fencei; // @[util.scala:505:22]
reg uops_9_is_sfence; // @[util.scala:505:22]
reg uops_9_is_amo; // @[util.scala:505:22]
reg uops_9_is_eret; // @[util.scala:505:22]
reg uops_9_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_9_is_rocc; // @[util.scala:505:22]
reg uops_9_is_mov; // @[util.scala:505:22]
reg [3:0] uops_9_ftq_idx; // @[util.scala:505:22]
reg uops_9_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_9_pc_lob; // @[util.scala:505:22]
reg uops_9_taken; // @[util.scala:505:22]
reg uops_9_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_9_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_9_pimm; // @[util.scala:505:22]
reg [19:0] uops_9_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_9_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_9_op2_sel; // @[util.scala:505:22]
reg uops_9_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_9_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_9_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_9_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_9_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_9_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_9_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_9_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_9_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_9_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_9_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_9_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_9_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_9_fp_ctrl_div; // @[util.scala:505:22]
reg uops_9_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_9_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_9_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_9_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_9_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_9_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_9_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_9_pdst; // @[util.scala:505:22]
reg [5:0] uops_9_prs1; // @[util.scala:505:22]
reg [5:0] uops_9_prs2; // @[util.scala:505:22]
reg [5:0] uops_9_prs3; // @[util.scala:505:22]
reg [3:0] uops_9_ppred; // @[util.scala:505:22]
reg uops_9_prs1_busy; // @[util.scala:505:22]
reg uops_9_prs2_busy; // @[util.scala:505:22]
reg uops_9_prs3_busy; // @[util.scala:505:22]
reg uops_9_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_9_stale_pdst; // @[util.scala:505:22]
reg uops_9_exception; // @[util.scala:505:22]
reg [63:0] uops_9_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_9_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_9_mem_size; // @[util.scala:505:22]
reg uops_9_mem_signed; // @[util.scala:505:22]
reg uops_9_uses_ldq; // @[util.scala:505:22]
reg uops_9_uses_stq; // @[util.scala:505:22]
reg uops_9_is_unique; // @[util.scala:505:22]
reg uops_9_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_9_csr_cmd; // @[util.scala:505:22]
reg uops_9_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_9_ldst; // @[util.scala:505:22]
reg [5:0] uops_9_lrs1; // @[util.scala:505:22]
reg [5:0] uops_9_lrs2; // @[util.scala:505:22]
reg [5:0] uops_9_lrs3; // @[util.scala:505:22]
reg [1:0] uops_9_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_9_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_9_lrs2_rtype; // @[util.scala:505:22]
reg uops_9_frs3_en; // @[util.scala:505:22]
reg uops_9_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_9_fcn_op; // @[util.scala:505:22]
reg uops_9_fp_val; // @[util.scala:505:22]
reg [2:0] uops_9_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_9_fp_typ; // @[util.scala:505:22]
reg uops_9_xcpt_pf_if; // @[util.scala:505:22]
reg uops_9_xcpt_ae_if; // @[util.scala:505:22]
reg uops_9_xcpt_ma_if; // @[util.scala:505:22]
reg uops_9_bp_debug_if; // @[util.scala:505:22]
reg uops_9_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_9_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_9_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_10_inst; // @[util.scala:505:22]
reg [31:0] uops_10_debug_inst; // @[util.scala:505:22]
reg uops_10_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_10_debug_pc; // @[util.scala:505:22]
reg uops_10_iq_type_0; // @[util.scala:505:22]
reg uops_10_iq_type_1; // @[util.scala:505:22]
reg uops_10_iq_type_2; // @[util.scala:505:22]
reg uops_10_iq_type_3; // @[util.scala:505:22]
reg uops_10_fu_code_0; // @[util.scala:505:22]
reg uops_10_fu_code_1; // @[util.scala:505:22]
reg uops_10_fu_code_2; // @[util.scala:505:22]
reg uops_10_fu_code_3; // @[util.scala:505:22]
reg uops_10_fu_code_4; // @[util.scala:505:22]
reg uops_10_fu_code_5; // @[util.scala:505:22]
reg uops_10_fu_code_6; // @[util.scala:505:22]
reg uops_10_fu_code_7; // @[util.scala:505:22]
reg uops_10_fu_code_8; // @[util.scala:505:22]
reg uops_10_fu_code_9; // @[util.scala:505:22]
reg uops_10_iw_issued; // @[util.scala:505:22]
reg uops_10_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_10_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_10_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_10_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_10_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_10_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_10_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_10_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_10_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_10_br_mask_T_1 = uops_10_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_10_br_tag; // @[util.scala:505:22]
reg [3:0] uops_10_br_type; // @[util.scala:505:22]
reg uops_10_is_sfb; // @[util.scala:505:22]
reg uops_10_is_fence; // @[util.scala:505:22]
reg uops_10_is_fencei; // @[util.scala:505:22]
reg uops_10_is_sfence; // @[util.scala:505:22]
reg uops_10_is_amo; // @[util.scala:505:22]
reg uops_10_is_eret; // @[util.scala:505:22]
reg uops_10_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_10_is_rocc; // @[util.scala:505:22]
reg uops_10_is_mov; // @[util.scala:505:22]
reg [3:0] uops_10_ftq_idx; // @[util.scala:505:22]
reg uops_10_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_10_pc_lob; // @[util.scala:505:22]
reg uops_10_taken; // @[util.scala:505:22]
reg uops_10_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_10_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_10_pimm; // @[util.scala:505:22]
reg [19:0] uops_10_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_10_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_10_op2_sel; // @[util.scala:505:22]
reg uops_10_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_10_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_10_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_10_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_10_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_10_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_10_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_10_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_10_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_10_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_10_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_10_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_10_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_10_fp_ctrl_div; // @[util.scala:505:22]
reg uops_10_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_10_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_10_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_10_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_10_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_10_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_10_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_10_pdst; // @[util.scala:505:22]
reg [5:0] uops_10_prs1; // @[util.scala:505:22]
reg [5:0] uops_10_prs2; // @[util.scala:505:22]
reg [5:0] uops_10_prs3; // @[util.scala:505:22]
reg [3:0] uops_10_ppred; // @[util.scala:505:22]
reg uops_10_prs1_busy; // @[util.scala:505:22]
reg uops_10_prs2_busy; // @[util.scala:505:22]
reg uops_10_prs3_busy; // @[util.scala:505:22]
reg uops_10_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_10_stale_pdst; // @[util.scala:505:22]
reg uops_10_exception; // @[util.scala:505:22]
reg [63:0] uops_10_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_10_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_10_mem_size; // @[util.scala:505:22]
reg uops_10_mem_signed; // @[util.scala:505:22]
reg uops_10_uses_ldq; // @[util.scala:505:22]
reg uops_10_uses_stq; // @[util.scala:505:22]
reg uops_10_is_unique; // @[util.scala:505:22]
reg uops_10_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_10_csr_cmd; // @[util.scala:505:22]
reg uops_10_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_10_ldst; // @[util.scala:505:22]
reg [5:0] uops_10_lrs1; // @[util.scala:505:22]
reg [5:0] uops_10_lrs2; // @[util.scala:505:22]
reg [5:0] uops_10_lrs3; // @[util.scala:505:22]
reg [1:0] uops_10_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_10_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_10_lrs2_rtype; // @[util.scala:505:22]
reg uops_10_frs3_en; // @[util.scala:505:22]
reg uops_10_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_10_fcn_op; // @[util.scala:505:22]
reg uops_10_fp_val; // @[util.scala:505:22]
reg [2:0] uops_10_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_10_fp_typ; // @[util.scala:505:22]
reg uops_10_xcpt_pf_if; // @[util.scala:505:22]
reg uops_10_xcpt_ae_if; // @[util.scala:505:22]
reg uops_10_xcpt_ma_if; // @[util.scala:505:22]
reg uops_10_bp_debug_if; // @[util.scala:505:22]
reg uops_10_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_10_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_10_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_11_inst; // @[util.scala:505:22]
reg [31:0] uops_11_debug_inst; // @[util.scala:505:22]
reg uops_11_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_11_debug_pc; // @[util.scala:505:22]
reg uops_11_iq_type_0; // @[util.scala:505:22]
reg uops_11_iq_type_1; // @[util.scala:505:22]
reg uops_11_iq_type_2; // @[util.scala:505:22]
reg uops_11_iq_type_3; // @[util.scala:505:22]
reg uops_11_fu_code_0; // @[util.scala:505:22]
reg uops_11_fu_code_1; // @[util.scala:505:22]
reg uops_11_fu_code_2; // @[util.scala:505:22]
reg uops_11_fu_code_3; // @[util.scala:505:22]
reg uops_11_fu_code_4; // @[util.scala:505:22]
reg uops_11_fu_code_5; // @[util.scala:505:22]
reg uops_11_fu_code_6; // @[util.scala:505:22]
reg uops_11_fu_code_7; // @[util.scala:505:22]
reg uops_11_fu_code_8; // @[util.scala:505:22]
reg uops_11_fu_code_9; // @[util.scala:505:22]
reg uops_11_iw_issued; // @[util.scala:505:22]
reg uops_11_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_11_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_11_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_11_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_11_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_11_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_11_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_11_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_11_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_11_br_mask_T_1 = uops_11_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_11_br_tag; // @[util.scala:505:22]
reg [3:0] uops_11_br_type; // @[util.scala:505:22]
reg uops_11_is_sfb; // @[util.scala:505:22]
reg uops_11_is_fence; // @[util.scala:505:22]
reg uops_11_is_fencei; // @[util.scala:505:22]
reg uops_11_is_sfence; // @[util.scala:505:22]
reg uops_11_is_amo; // @[util.scala:505:22]
reg uops_11_is_eret; // @[util.scala:505:22]
reg uops_11_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_11_is_rocc; // @[util.scala:505:22]
reg uops_11_is_mov; // @[util.scala:505:22]
reg [3:0] uops_11_ftq_idx; // @[util.scala:505:22]
reg uops_11_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_11_pc_lob; // @[util.scala:505:22]
reg uops_11_taken; // @[util.scala:505:22]
reg uops_11_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_11_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_11_pimm; // @[util.scala:505:22]
reg [19:0] uops_11_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_11_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_11_op2_sel; // @[util.scala:505:22]
reg uops_11_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_11_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_11_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_11_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_11_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_11_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_11_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_11_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_11_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_11_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_11_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_11_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_11_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_11_fp_ctrl_div; // @[util.scala:505:22]
reg uops_11_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_11_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_11_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_11_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_11_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_11_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_11_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_11_pdst; // @[util.scala:505:22]
reg [5:0] uops_11_prs1; // @[util.scala:505:22]
reg [5:0] uops_11_prs2; // @[util.scala:505:22]
reg [5:0] uops_11_prs3; // @[util.scala:505:22]
reg [3:0] uops_11_ppred; // @[util.scala:505:22]
reg uops_11_prs1_busy; // @[util.scala:505:22]
reg uops_11_prs2_busy; // @[util.scala:505:22]
reg uops_11_prs3_busy; // @[util.scala:505:22]
reg uops_11_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_11_stale_pdst; // @[util.scala:505:22]
reg uops_11_exception; // @[util.scala:505:22]
reg [63:0] uops_11_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_11_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_11_mem_size; // @[util.scala:505:22]
reg uops_11_mem_signed; // @[util.scala:505:22]
reg uops_11_uses_ldq; // @[util.scala:505:22]
reg uops_11_uses_stq; // @[util.scala:505:22]
reg uops_11_is_unique; // @[util.scala:505:22]
reg uops_11_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_11_csr_cmd; // @[util.scala:505:22]
reg uops_11_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_11_ldst; // @[util.scala:505:22]
reg [5:0] uops_11_lrs1; // @[util.scala:505:22]
reg [5:0] uops_11_lrs2; // @[util.scala:505:22]
reg [5:0] uops_11_lrs3; // @[util.scala:505:22]
reg [1:0] uops_11_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_11_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_11_lrs2_rtype; // @[util.scala:505:22]
reg uops_11_frs3_en; // @[util.scala:505:22]
reg uops_11_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_11_fcn_op; // @[util.scala:505:22]
reg uops_11_fp_val; // @[util.scala:505:22]
reg [2:0] uops_11_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_11_fp_typ; // @[util.scala:505:22]
reg uops_11_xcpt_pf_if; // @[util.scala:505:22]
reg uops_11_xcpt_ae_if; // @[util.scala:505:22]
reg uops_11_xcpt_ma_if; // @[util.scala:505:22]
reg uops_11_bp_debug_if; // @[util.scala:505:22]
reg uops_11_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_11_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_11_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_12_inst; // @[util.scala:505:22]
reg [31:0] uops_12_debug_inst; // @[util.scala:505:22]
reg uops_12_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_12_debug_pc; // @[util.scala:505:22]
reg uops_12_iq_type_0; // @[util.scala:505:22]
reg uops_12_iq_type_1; // @[util.scala:505:22]
reg uops_12_iq_type_2; // @[util.scala:505:22]
reg uops_12_iq_type_3; // @[util.scala:505:22]
reg uops_12_fu_code_0; // @[util.scala:505:22]
reg uops_12_fu_code_1; // @[util.scala:505:22]
reg uops_12_fu_code_2; // @[util.scala:505:22]
reg uops_12_fu_code_3; // @[util.scala:505:22]
reg uops_12_fu_code_4; // @[util.scala:505:22]
reg uops_12_fu_code_5; // @[util.scala:505:22]
reg uops_12_fu_code_6; // @[util.scala:505:22]
reg uops_12_fu_code_7; // @[util.scala:505:22]
reg uops_12_fu_code_8; // @[util.scala:505:22]
reg uops_12_fu_code_9; // @[util.scala:505:22]
reg uops_12_iw_issued; // @[util.scala:505:22]
reg uops_12_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_12_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_12_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_12_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_12_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_12_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_12_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_12_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_12_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_12_br_mask_T_1 = uops_12_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_12_br_tag; // @[util.scala:505:22]
reg [3:0] uops_12_br_type; // @[util.scala:505:22]
reg uops_12_is_sfb; // @[util.scala:505:22]
reg uops_12_is_fence; // @[util.scala:505:22]
reg uops_12_is_fencei; // @[util.scala:505:22]
reg uops_12_is_sfence; // @[util.scala:505:22]
reg uops_12_is_amo; // @[util.scala:505:22]
reg uops_12_is_eret; // @[util.scala:505:22]
reg uops_12_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_12_is_rocc; // @[util.scala:505:22]
reg uops_12_is_mov; // @[util.scala:505:22]
reg [3:0] uops_12_ftq_idx; // @[util.scala:505:22]
reg uops_12_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_12_pc_lob; // @[util.scala:505:22]
reg uops_12_taken; // @[util.scala:505:22]
reg uops_12_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_12_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_12_pimm; // @[util.scala:505:22]
reg [19:0] uops_12_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_12_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_12_op2_sel; // @[util.scala:505:22]
reg uops_12_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_12_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_12_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_12_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_12_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_12_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_12_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_12_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_12_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_12_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_12_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_12_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_12_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_12_fp_ctrl_div; // @[util.scala:505:22]
reg uops_12_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_12_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_12_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_12_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_12_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_12_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_12_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_12_pdst; // @[util.scala:505:22]
reg [5:0] uops_12_prs1; // @[util.scala:505:22]
reg [5:0] uops_12_prs2; // @[util.scala:505:22]
reg [5:0] uops_12_prs3; // @[util.scala:505:22]
reg [3:0] uops_12_ppred; // @[util.scala:505:22]
reg uops_12_prs1_busy; // @[util.scala:505:22]
reg uops_12_prs2_busy; // @[util.scala:505:22]
reg uops_12_prs3_busy; // @[util.scala:505:22]
reg uops_12_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_12_stale_pdst; // @[util.scala:505:22]
reg uops_12_exception; // @[util.scala:505:22]
reg [63:0] uops_12_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_12_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_12_mem_size; // @[util.scala:505:22]
reg uops_12_mem_signed; // @[util.scala:505:22]
reg uops_12_uses_ldq; // @[util.scala:505:22]
reg uops_12_uses_stq; // @[util.scala:505:22]
reg uops_12_is_unique; // @[util.scala:505:22]
reg uops_12_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_12_csr_cmd; // @[util.scala:505:22]
reg uops_12_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_12_ldst; // @[util.scala:505:22]
reg [5:0] uops_12_lrs1; // @[util.scala:505:22]
reg [5:0] uops_12_lrs2; // @[util.scala:505:22]
reg [5:0] uops_12_lrs3; // @[util.scala:505:22]
reg [1:0] uops_12_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_12_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_12_lrs2_rtype; // @[util.scala:505:22]
reg uops_12_frs3_en; // @[util.scala:505:22]
reg uops_12_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_12_fcn_op; // @[util.scala:505:22]
reg uops_12_fp_val; // @[util.scala:505:22]
reg [2:0] uops_12_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_12_fp_typ; // @[util.scala:505:22]
reg uops_12_xcpt_pf_if; // @[util.scala:505:22]
reg uops_12_xcpt_ae_if; // @[util.scala:505:22]
reg uops_12_xcpt_ma_if; // @[util.scala:505:22]
reg uops_12_bp_debug_if; // @[util.scala:505:22]
reg uops_12_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_12_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_12_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_13_inst; // @[util.scala:505:22]
reg [31:0] uops_13_debug_inst; // @[util.scala:505:22]
reg uops_13_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_13_debug_pc; // @[util.scala:505:22]
reg uops_13_iq_type_0; // @[util.scala:505:22]
reg uops_13_iq_type_1; // @[util.scala:505:22]
reg uops_13_iq_type_2; // @[util.scala:505:22]
reg uops_13_iq_type_3; // @[util.scala:505:22]
reg uops_13_fu_code_0; // @[util.scala:505:22]
reg uops_13_fu_code_1; // @[util.scala:505:22]
reg uops_13_fu_code_2; // @[util.scala:505:22]
reg uops_13_fu_code_3; // @[util.scala:505:22]
reg uops_13_fu_code_4; // @[util.scala:505:22]
reg uops_13_fu_code_5; // @[util.scala:505:22]
reg uops_13_fu_code_6; // @[util.scala:505:22]
reg uops_13_fu_code_7; // @[util.scala:505:22]
reg uops_13_fu_code_8; // @[util.scala:505:22]
reg uops_13_fu_code_9; // @[util.scala:505:22]
reg uops_13_iw_issued; // @[util.scala:505:22]
reg uops_13_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_13_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_13_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_13_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_13_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_13_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_13_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_13_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_13_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_13_br_mask_T_1 = uops_13_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_13_br_tag; // @[util.scala:505:22]
reg [3:0] uops_13_br_type; // @[util.scala:505:22]
reg uops_13_is_sfb; // @[util.scala:505:22]
reg uops_13_is_fence; // @[util.scala:505:22]
reg uops_13_is_fencei; // @[util.scala:505:22]
reg uops_13_is_sfence; // @[util.scala:505:22]
reg uops_13_is_amo; // @[util.scala:505:22]
reg uops_13_is_eret; // @[util.scala:505:22]
reg uops_13_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_13_is_rocc; // @[util.scala:505:22]
reg uops_13_is_mov; // @[util.scala:505:22]
reg [3:0] uops_13_ftq_idx; // @[util.scala:505:22]
reg uops_13_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_13_pc_lob; // @[util.scala:505:22]
reg uops_13_taken; // @[util.scala:505:22]
reg uops_13_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_13_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_13_pimm; // @[util.scala:505:22]
reg [19:0] uops_13_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_13_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_13_op2_sel; // @[util.scala:505:22]
reg uops_13_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_13_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_13_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_13_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_13_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_13_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_13_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_13_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_13_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_13_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_13_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_13_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_13_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_13_fp_ctrl_div; // @[util.scala:505:22]
reg uops_13_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_13_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_13_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_13_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_13_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_13_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_13_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_13_pdst; // @[util.scala:505:22]
reg [5:0] uops_13_prs1; // @[util.scala:505:22]
reg [5:0] uops_13_prs2; // @[util.scala:505:22]
reg [5:0] uops_13_prs3; // @[util.scala:505:22]
reg [3:0] uops_13_ppred; // @[util.scala:505:22]
reg uops_13_prs1_busy; // @[util.scala:505:22]
reg uops_13_prs2_busy; // @[util.scala:505:22]
reg uops_13_prs3_busy; // @[util.scala:505:22]
reg uops_13_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_13_stale_pdst; // @[util.scala:505:22]
reg uops_13_exception; // @[util.scala:505:22]
reg [63:0] uops_13_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_13_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_13_mem_size; // @[util.scala:505:22]
reg uops_13_mem_signed; // @[util.scala:505:22]
reg uops_13_uses_ldq; // @[util.scala:505:22]
reg uops_13_uses_stq; // @[util.scala:505:22]
reg uops_13_is_unique; // @[util.scala:505:22]
reg uops_13_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_13_csr_cmd; // @[util.scala:505:22]
reg uops_13_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_13_ldst; // @[util.scala:505:22]
reg [5:0] uops_13_lrs1; // @[util.scala:505:22]
reg [5:0] uops_13_lrs2; // @[util.scala:505:22]
reg [5:0] uops_13_lrs3; // @[util.scala:505:22]
reg [1:0] uops_13_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_13_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_13_lrs2_rtype; // @[util.scala:505:22]
reg uops_13_frs3_en; // @[util.scala:505:22]
reg uops_13_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_13_fcn_op; // @[util.scala:505:22]
reg uops_13_fp_val; // @[util.scala:505:22]
reg [2:0] uops_13_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_13_fp_typ; // @[util.scala:505:22]
reg uops_13_xcpt_pf_if; // @[util.scala:505:22]
reg uops_13_xcpt_ae_if; // @[util.scala:505:22]
reg uops_13_xcpt_ma_if; // @[util.scala:505:22]
reg uops_13_bp_debug_if; // @[util.scala:505:22]
reg uops_13_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_13_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_13_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_14_inst; // @[util.scala:505:22]
reg [31:0] uops_14_debug_inst; // @[util.scala:505:22]
reg uops_14_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_14_debug_pc; // @[util.scala:505:22]
reg uops_14_iq_type_0; // @[util.scala:505:22]
reg uops_14_iq_type_1; // @[util.scala:505:22]
reg uops_14_iq_type_2; // @[util.scala:505:22]
reg uops_14_iq_type_3; // @[util.scala:505:22]
reg uops_14_fu_code_0; // @[util.scala:505:22]
reg uops_14_fu_code_1; // @[util.scala:505:22]
reg uops_14_fu_code_2; // @[util.scala:505:22]
reg uops_14_fu_code_3; // @[util.scala:505:22]
reg uops_14_fu_code_4; // @[util.scala:505:22]
reg uops_14_fu_code_5; // @[util.scala:505:22]
reg uops_14_fu_code_6; // @[util.scala:505:22]
reg uops_14_fu_code_7; // @[util.scala:505:22]
reg uops_14_fu_code_8; // @[util.scala:505:22]
reg uops_14_fu_code_9; // @[util.scala:505:22]
reg uops_14_iw_issued; // @[util.scala:505:22]
reg uops_14_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_14_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_14_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_14_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_14_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_14_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_14_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_14_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_14_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_14_br_mask_T_1 = uops_14_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_14_br_tag; // @[util.scala:505:22]
reg [3:0] uops_14_br_type; // @[util.scala:505:22]
reg uops_14_is_sfb; // @[util.scala:505:22]
reg uops_14_is_fence; // @[util.scala:505:22]
reg uops_14_is_fencei; // @[util.scala:505:22]
reg uops_14_is_sfence; // @[util.scala:505:22]
reg uops_14_is_amo; // @[util.scala:505:22]
reg uops_14_is_eret; // @[util.scala:505:22]
reg uops_14_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_14_is_rocc; // @[util.scala:505:22]
reg uops_14_is_mov; // @[util.scala:505:22]
reg [3:0] uops_14_ftq_idx; // @[util.scala:505:22]
reg uops_14_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_14_pc_lob; // @[util.scala:505:22]
reg uops_14_taken; // @[util.scala:505:22]
reg uops_14_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_14_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_14_pimm; // @[util.scala:505:22]
reg [19:0] uops_14_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_14_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_14_op2_sel; // @[util.scala:505:22]
reg uops_14_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_14_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_14_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_14_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_14_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_14_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_14_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_14_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_14_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_14_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_14_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_14_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_14_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_14_fp_ctrl_div; // @[util.scala:505:22]
reg uops_14_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_14_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_14_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_14_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_14_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_14_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_14_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_14_pdst; // @[util.scala:505:22]
reg [5:0] uops_14_prs1; // @[util.scala:505:22]
reg [5:0] uops_14_prs2; // @[util.scala:505:22]
reg [5:0] uops_14_prs3; // @[util.scala:505:22]
reg [3:0] uops_14_ppred; // @[util.scala:505:22]
reg uops_14_prs1_busy; // @[util.scala:505:22]
reg uops_14_prs2_busy; // @[util.scala:505:22]
reg uops_14_prs3_busy; // @[util.scala:505:22]
reg uops_14_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_14_stale_pdst; // @[util.scala:505:22]
reg uops_14_exception; // @[util.scala:505:22]
reg [63:0] uops_14_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_14_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_14_mem_size; // @[util.scala:505:22]
reg uops_14_mem_signed; // @[util.scala:505:22]
reg uops_14_uses_ldq; // @[util.scala:505:22]
reg uops_14_uses_stq; // @[util.scala:505:22]
reg uops_14_is_unique; // @[util.scala:505:22]
reg uops_14_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_14_csr_cmd; // @[util.scala:505:22]
reg uops_14_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_14_ldst; // @[util.scala:505:22]
reg [5:0] uops_14_lrs1; // @[util.scala:505:22]
reg [5:0] uops_14_lrs2; // @[util.scala:505:22]
reg [5:0] uops_14_lrs3; // @[util.scala:505:22]
reg [1:0] uops_14_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_14_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_14_lrs2_rtype; // @[util.scala:505:22]
reg uops_14_frs3_en; // @[util.scala:505:22]
reg uops_14_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_14_fcn_op; // @[util.scala:505:22]
reg uops_14_fp_val; // @[util.scala:505:22]
reg [2:0] uops_14_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_14_fp_typ; // @[util.scala:505:22]
reg uops_14_xcpt_pf_if; // @[util.scala:505:22]
reg uops_14_xcpt_ae_if; // @[util.scala:505:22]
reg uops_14_xcpt_ma_if; // @[util.scala:505:22]
reg uops_14_bp_debug_if; // @[util.scala:505:22]
reg uops_14_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_14_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_14_debug_tsrc; // @[util.scala:505:22]
reg [3:0] enq_ptr_value; // @[Counter.scala:61:40]
reg [3:0] deq_ptr_value; // @[Counter.scala:61:40]
reg maybe_full; // @[util.scala:509:29]
wire ptr_match = enq_ptr_value == deq_ptr_value; // @[Counter.scala:61:40]
wire _io_empty_T = ~maybe_full; // @[util.scala:509:29, :512:30]
assign _io_empty_T_1 = ptr_match & _io_empty_T; // @[util.scala:511:35, :512:{27,30}]
assign io_empty_0 = _io_empty_T_1; // @[util.scala:458:7, :512:27]
wire full = ptr_match & maybe_full; // @[util.scala:509:29, :511:35, :513:26]
wire _do_enq_T = io_enq_ready_0 & io_enq_valid_0; // @[Decoupled.scala:51:35]
wire _do_enq_T_5 = _do_enq_T; // @[Decoupled.scala:51:35]
wire _do_enq_T_8 = _do_enq_T_5; // @[util.scala:514:{39,99}]
wire do_enq = _do_enq_T_8; // @[util.scala:514:{26,99}]
wire [15:0] _GEN = {{valids_0}, {valids_14}, {valids_13}, {valids_12}, {valids_11}, {valids_10}, {valids_9}, {valids_8}, {valids_7}, {valids_6}, {valids_5}, {valids_4}, {valids_3}, {valids_2}, {valids_1}, {valids_0}}; // @[util.scala:504:26, :515:44]
wire _GEN_0 = _GEN[deq_ptr_value]; // @[Counter.scala:61:40]
wire _do_deq_T = ~_GEN_0; // @[util.scala:515:44]
wire _do_deq_T_1 = io_deq_ready_0 | _do_deq_T; // @[util.scala:458:7, :515:{41,44}]
wire _do_deq_T_2 = ~io_empty_0; // @[util.scala:458:7, :515:71]
wire _do_deq_T_3 = _do_deq_T_1 & _do_deq_T_2; // @[util.scala:515:{41,68,71}]
wire do_deq = _do_deq_T_3; // @[util.scala:515:{26,68}]
wire _valids_0_T_7 = _valids_0_T_4; // @[util.scala:520:{31,80}]
wire _valids_1_T_7 = _valids_1_T_4; // @[util.scala:520:{31,80}]
wire _valids_2_T_7 = _valids_2_T_4; // @[util.scala:520:{31,80}]
wire _valids_3_T_7 = _valids_3_T_4; // @[util.scala:520:{31,80}]
wire _valids_4_T_7 = _valids_4_T_4; // @[util.scala:520:{31,80}]
wire _valids_5_T_7 = _valids_5_T_4; // @[util.scala:520:{31,80}]
wire _valids_6_T_7 = _valids_6_T_4; // @[util.scala:520:{31,80}]
wire _valids_7_T_7 = _valids_7_T_4; // @[util.scala:520:{31,80}]
wire _valids_8_T_7 = _valids_8_T_4; // @[util.scala:520:{31,80}]
wire _valids_9_T_7 = _valids_9_T_4; // @[util.scala:520:{31,80}]
wire _valids_10_T_7 = _valids_10_T_4; // @[util.scala:520:{31,80}]
wire _valids_11_T_7 = _valids_11_T_4; // @[util.scala:520:{31,80}]
wire _valids_12_T_7 = _valids_12_T_4; // @[util.scala:520:{31,80}]
wire _valids_13_T_7 = _valids_13_T_4; // @[util.scala:520:{31,80}]
wire _valids_14_T_7 = _valids_14_T_4; // @[util.scala:520:{31,80}]
wire wrap = enq_ptr_value == 4'hE; // @[Counter.scala:61:40, :73:24]
wire [4:0] _GEN_1 = {1'h0, enq_ptr_value}; // @[Counter.scala:61:40, :77:24]
wire [4:0] _value_T = _GEN_1 + 5'h1; // @[Counter.scala:77:24]
wire [3:0] _value_T_1 = _value_T[3:0]; // @[Counter.scala:77:24]
wire wrap_1 = deq_ptr_value == 4'hE; // @[Counter.scala:61:40, :73:24]
wire [4:0] _GEN_2 = {1'h0, deq_ptr_value}; // @[Counter.scala:61:40, :77:24]
wire [4:0] _value_T_2 = _GEN_2 + 5'h1; // @[Counter.scala:77:24]
wire [3:0] _value_T_3 = _value_T_2[3:0]; // @[Counter.scala:77:24]
assign _io_enq_ready_T = ~full; // @[util.scala:513:26, :543:21]
assign io_enq_ready_0 = _io_enq_ready_T; // @[util.scala:458:7, :543:21]
assign io_deq_bits_uop_inst_0 = out_uop_inst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_debug_inst_0 = out_uop_debug_inst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_rvc_0 = out_uop_is_rvc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_debug_pc_0 = out_uop_debug_pc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iq_type_0_0 = out_uop_iq_type_0; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iq_type_1_0 = out_uop_iq_type_1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iq_type_2_0 = out_uop_iq_type_2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iq_type_3_0 = out_uop_iq_type_3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_0_0 = out_uop_fu_code_0; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_1_0 = out_uop_fu_code_1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_2_0 = out_uop_fu_code_2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_3_0 = out_uop_fu_code_3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_4_0 = out_uop_fu_code_4; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_5_0 = out_uop_fu_code_5; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_6_0 = out_uop_fu_code_6; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_7_0 = out_uop_fu_code_7; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_8_0 = out_uop_fu_code_8; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_9_0 = out_uop_fu_code_9; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_issued_0 = out_uop_iw_issued; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_issued_partial_agen_0 = out_uop_iw_issued_partial_agen; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_issued_partial_dgen_0 = out_uop_iw_issued_partial_dgen; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p1_speculative_child_0 = out_uop_iw_p1_speculative_child; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p2_speculative_child_0 = out_uop_iw_p2_speculative_child; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p1_bypass_hint_0 = out_uop_iw_p1_bypass_hint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p2_bypass_hint_0 = out_uop_iw_p2_bypass_hint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p3_bypass_hint_0 = out_uop_iw_p3_bypass_hint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_dis_col_sel_0 = out_uop_dis_col_sel; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_br_mask_0 = out_uop_br_mask; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_br_tag_0 = out_uop_br_tag; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_br_type_0 = out_uop_br_type; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_sfb_0 = out_uop_is_sfb; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_fence_0 = out_uop_is_fence; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_fencei_0 = out_uop_is_fencei; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_sfence_0 = out_uop_is_sfence; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_amo_0 = out_uop_is_amo; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_eret_0 = out_uop_is_eret; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_sys_pc2epc_0 = out_uop_is_sys_pc2epc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_rocc_0 = out_uop_is_rocc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_mov_0 = out_uop_is_mov; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ftq_idx_0 = out_uop_ftq_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_edge_inst_0 = out_uop_edge_inst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_pc_lob_0 = out_uop_pc_lob; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_taken_0 = out_uop_taken; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_imm_rename_0 = out_uop_imm_rename; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_imm_sel_0 = out_uop_imm_sel; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_pimm_0 = out_uop_pimm; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_imm_packed_0 = out_uop_imm_packed; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_op1_sel_0 = out_uop_op1_sel; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_op2_sel_0 = out_uop_op2_sel; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_ldst_0 = out_uop_fp_ctrl_ldst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_wen_0 = out_uop_fp_ctrl_wen; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_ren1_0 = out_uop_fp_ctrl_ren1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_ren2_0 = out_uop_fp_ctrl_ren2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_ren3_0 = out_uop_fp_ctrl_ren3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_swap12_0 = out_uop_fp_ctrl_swap12; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_swap23_0 = out_uop_fp_ctrl_swap23; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_typeTagIn_0 = out_uop_fp_ctrl_typeTagIn; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_typeTagOut_0 = out_uop_fp_ctrl_typeTagOut; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_fromint_0 = out_uop_fp_ctrl_fromint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_toint_0 = out_uop_fp_ctrl_toint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_fastpipe_0 = out_uop_fp_ctrl_fastpipe; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_fma_0 = out_uop_fp_ctrl_fma; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_div_0 = out_uop_fp_ctrl_div; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_sqrt_0 = out_uop_fp_ctrl_sqrt; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_wflags_0 = out_uop_fp_ctrl_wflags; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_vec_0 = out_uop_fp_ctrl_vec; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_rob_idx_0 = out_uop_rob_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ldq_idx_0 = out_uop_ldq_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_stq_idx_0 = out_uop_stq_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_rxq_idx_0 = out_uop_rxq_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_pdst_0 = out_uop_pdst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs1_0 = out_uop_prs1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs2_0 = out_uop_prs2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs3_0 = out_uop_prs3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ppred_0 = out_uop_ppred; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs1_busy_0 = out_uop_prs1_busy; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs2_busy_0 = out_uop_prs2_busy; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs3_busy_0 = out_uop_prs3_busy; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ppred_busy_0 = out_uop_ppred_busy; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_stale_pdst_0 = out_uop_stale_pdst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_exception_0 = out_uop_exception; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_exc_cause_0 = out_uop_exc_cause; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_mem_cmd_0 = out_uop_mem_cmd; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_mem_size_0 = out_uop_mem_size; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_mem_signed_0 = out_uop_mem_signed; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_uses_ldq_0 = out_uop_uses_ldq; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_uses_stq_0 = out_uop_uses_stq; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_unique_0 = out_uop_is_unique; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_flush_on_commit_0 = out_uop_flush_on_commit; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_csr_cmd_0 = out_uop_csr_cmd; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ldst_is_rs1_0 = out_uop_ldst_is_rs1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ldst_0 = out_uop_ldst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs1_0 = out_uop_lrs1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs2_0 = out_uop_lrs2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs3_0 = out_uop_lrs3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_dst_rtype_0 = out_uop_dst_rtype; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs1_rtype_0 = out_uop_lrs1_rtype; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs2_rtype_0 = out_uop_lrs2_rtype; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_frs3_en_0 = out_uop_frs3_en; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fcn_dw_0 = out_uop_fcn_dw; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fcn_op_0 = out_uop_fcn_op; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_val_0 = out_uop_fp_val; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_rm_0 = out_uop_fp_rm; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_typ_0 = out_uop_fp_typ; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_xcpt_pf_if_0 = out_uop_xcpt_pf_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_xcpt_ae_if_0 = out_uop_xcpt_ae_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_xcpt_ma_if_0 = out_uop_xcpt_ma_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_bp_debug_if_0 = out_uop_bp_debug_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_bp_xcpt_if_0 = out_uop_bp_xcpt_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_debug_fsrc_0 = out_uop_debug_fsrc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_debug_tsrc_0 = out_uop_debug_tsrc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_addr_0 = out_addr; // @[util.scala:458:7, :545:19]
assign io_deq_bits_data_0 = out_data; // @[util.scala:458:7, :545:19]
assign io_deq_bits_is_hella_0 = out_is_hella; // @[util.scala:458:7, :545:19]
assign io_deq_bits_tag_match_0 = out_tag_match; // @[util.scala:458:7, :545:19]
assign io_deq_bits_old_meta_coh_state_0 = out_old_meta_coh_state; // @[util.scala:458:7, :545:19]
assign io_deq_bits_old_meta_tag_0 = out_old_meta_tag; // @[util.scala:458:7, :545:19]
assign io_deq_bits_way_en_0 = out_way_en; // @[util.scala:458:7, :545:19]
assign io_deq_bits_sdq_id_0 = out_sdq_id; // @[util.scala:458:7, :545:19]
wire [15:0][31:0] _GEN_3 = {{uops_0_inst}, {uops_14_inst}, {uops_13_inst}, {uops_12_inst}, {uops_11_inst}, {uops_10_inst}, {uops_9_inst}, {uops_8_inst}, {uops_7_inst}, {uops_6_inst}, {uops_5_inst}, {uops_4_inst}, {uops_3_inst}, {uops_2_inst}, {uops_1_inst}, {uops_0_inst}}; // @[util.scala:505:22, :547:21]
assign out_uop_inst = _GEN_3[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][31:0] _GEN_4 = {{uops_0_debug_inst}, {uops_14_debug_inst}, {uops_13_debug_inst}, {uops_12_debug_inst}, {uops_11_debug_inst}, {uops_10_debug_inst}, {uops_9_debug_inst}, {uops_8_debug_inst}, {uops_7_debug_inst}, {uops_6_debug_inst}, {uops_5_debug_inst}, {uops_4_debug_inst}, {uops_3_debug_inst}, {uops_2_debug_inst}, {uops_1_debug_inst}, {uops_0_debug_inst}}; // @[util.scala:505:22, :547:21]
assign out_uop_debug_inst = _GEN_4[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_5 = {{uops_0_is_rvc}, {uops_14_is_rvc}, {uops_13_is_rvc}, {uops_12_is_rvc}, {uops_11_is_rvc}, {uops_10_is_rvc}, {uops_9_is_rvc}, {uops_8_is_rvc}, {uops_7_is_rvc}, {uops_6_is_rvc}, {uops_5_is_rvc}, {uops_4_is_rvc}, {uops_3_is_rvc}, {uops_2_is_rvc}, {uops_1_is_rvc}, {uops_0_is_rvc}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_rvc = _GEN_5[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][33:0] _GEN_6 = {{uops_0_debug_pc}, {uops_14_debug_pc}, {uops_13_debug_pc}, {uops_12_debug_pc}, {uops_11_debug_pc}, {uops_10_debug_pc}, {uops_9_debug_pc}, {uops_8_debug_pc}, {uops_7_debug_pc}, {uops_6_debug_pc}, {uops_5_debug_pc}, {uops_4_debug_pc}, {uops_3_debug_pc}, {uops_2_debug_pc}, {uops_1_debug_pc}, {uops_0_debug_pc}}; // @[util.scala:505:22, :547:21]
assign out_uop_debug_pc = _GEN_6[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_7 = {{uops_0_iq_type_0}, {uops_14_iq_type_0}, {uops_13_iq_type_0}, {uops_12_iq_type_0}, {uops_11_iq_type_0}, {uops_10_iq_type_0}, {uops_9_iq_type_0}, {uops_8_iq_type_0}, {uops_7_iq_type_0}, {uops_6_iq_type_0}, {uops_5_iq_type_0}, {uops_4_iq_type_0}, {uops_3_iq_type_0}, {uops_2_iq_type_0}, {uops_1_iq_type_0}, {uops_0_iq_type_0}}; // @[util.scala:505:22, :547:21]
assign out_uop_iq_type_0 = _GEN_7[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_8 = {{uops_0_iq_type_1}, {uops_14_iq_type_1}, {uops_13_iq_type_1}, {uops_12_iq_type_1}, {uops_11_iq_type_1}, {uops_10_iq_type_1}, {uops_9_iq_type_1}, {uops_8_iq_type_1}, {uops_7_iq_type_1}, {uops_6_iq_type_1}, {uops_5_iq_type_1}, {uops_4_iq_type_1}, {uops_3_iq_type_1}, {uops_2_iq_type_1}, {uops_1_iq_type_1}, {uops_0_iq_type_1}}; // @[util.scala:505:22, :547:21]
assign out_uop_iq_type_1 = _GEN_8[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_9 = {{uops_0_iq_type_2}, {uops_14_iq_type_2}, {uops_13_iq_type_2}, {uops_12_iq_type_2}, {uops_11_iq_type_2}, {uops_10_iq_type_2}, {uops_9_iq_type_2}, {uops_8_iq_type_2}, {uops_7_iq_type_2}, {uops_6_iq_type_2}, {uops_5_iq_type_2}, {uops_4_iq_type_2}, {uops_3_iq_type_2}, {uops_2_iq_type_2}, {uops_1_iq_type_2}, {uops_0_iq_type_2}}; // @[util.scala:505:22, :547:21]
assign out_uop_iq_type_2 = _GEN_9[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_10 = {{uops_0_iq_type_3}, {uops_14_iq_type_3}, {uops_13_iq_type_3}, {uops_12_iq_type_3}, {uops_11_iq_type_3}, {uops_10_iq_type_3}, {uops_9_iq_type_3}, {uops_8_iq_type_3}, {uops_7_iq_type_3}, {uops_6_iq_type_3}, {uops_5_iq_type_3}, {uops_4_iq_type_3}, {uops_3_iq_type_3}, {uops_2_iq_type_3}, {uops_1_iq_type_3}, {uops_0_iq_type_3}}; // @[util.scala:505:22, :547:21]
assign out_uop_iq_type_3 = _GEN_10[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_11 = {{uops_0_fu_code_0}, {uops_14_fu_code_0}, {uops_13_fu_code_0}, {uops_12_fu_code_0}, {uops_11_fu_code_0}, {uops_10_fu_code_0}, {uops_9_fu_code_0}, {uops_8_fu_code_0}, {uops_7_fu_code_0}, {uops_6_fu_code_0}, {uops_5_fu_code_0}, {uops_4_fu_code_0}, {uops_3_fu_code_0}, {uops_2_fu_code_0}, {uops_1_fu_code_0}, {uops_0_fu_code_0}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_0 = _GEN_11[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_12 = {{uops_0_fu_code_1}, {uops_14_fu_code_1}, {uops_13_fu_code_1}, {uops_12_fu_code_1}, {uops_11_fu_code_1}, {uops_10_fu_code_1}, {uops_9_fu_code_1}, {uops_8_fu_code_1}, {uops_7_fu_code_1}, {uops_6_fu_code_1}, {uops_5_fu_code_1}, {uops_4_fu_code_1}, {uops_3_fu_code_1}, {uops_2_fu_code_1}, {uops_1_fu_code_1}, {uops_0_fu_code_1}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_1 = _GEN_12[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_13 = {{uops_0_fu_code_2}, {uops_14_fu_code_2}, {uops_13_fu_code_2}, {uops_12_fu_code_2}, {uops_11_fu_code_2}, {uops_10_fu_code_2}, {uops_9_fu_code_2}, {uops_8_fu_code_2}, {uops_7_fu_code_2}, {uops_6_fu_code_2}, {uops_5_fu_code_2}, {uops_4_fu_code_2}, {uops_3_fu_code_2}, {uops_2_fu_code_2}, {uops_1_fu_code_2}, {uops_0_fu_code_2}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_2 = _GEN_13[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_14 = {{uops_0_fu_code_3}, {uops_14_fu_code_3}, {uops_13_fu_code_3}, {uops_12_fu_code_3}, {uops_11_fu_code_3}, {uops_10_fu_code_3}, {uops_9_fu_code_3}, {uops_8_fu_code_3}, {uops_7_fu_code_3}, {uops_6_fu_code_3}, {uops_5_fu_code_3}, {uops_4_fu_code_3}, {uops_3_fu_code_3}, {uops_2_fu_code_3}, {uops_1_fu_code_3}, {uops_0_fu_code_3}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_3 = _GEN_14[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_15 = {{uops_0_fu_code_4}, {uops_14_fu_code_4}, {uops_13_fu_code_4}, {uops_12_fu_code_4}, {uops_11_fu_code_4}, {uops_10_fu_code_4}, {uops_9_fu_code_4}, {uops_8_fu_code_4}, {uops_7_fu_code_4}, {uops_6_fu_code_4}, {uops_5_fu_code_4}, {uops_4_fu_code_4}, {uops_3_fu_code_4}, {uops_2_fu_code_4}, {uops_1_fu_code_4}, {uops_0_fu_code_4}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_4 = _GEN_15[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_16 = {{uops_0_fu_code_5}, {uops_14_fu_code_5}, {uops_13_fu_code_5}, {uops_12_fu_code_5}, {uops_11_fu_code_5}, {uops_10_fu_code_5}, {uops_9_fu_code_5}, {uops_8_fu_code_5}, {uops_7_fu_code_5}, {uops_6_fu_code_5}, {uops_5_fu_code_5}, {uops_4_fu_code_5}, {uops_3_fu_code_5}, {uops_2_fu_code_5}, {uops_1_fu_code_5}, {uops_0_fu_code_5}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_5 = _GEN_16[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_17 = {{uops_0_fu_code_6}, {uops_14_fu_code_6}, {uops_13_fu_code_6}, {uops_12_fu_code_6}, {uops_11_fu_code_6}, {uops_10_fu_code_6}, {uops_9_fu_code_6}, {uops_8_fu_code_6}, {uops_7_fu_code_6}, {uops_6_fu_code_6}, {uops_5_fu_code_6}, {uops_4_fu_code_6}, {uops_3_fu_code_6}, {uops_2_fu_code_6}, {uops_1_fu_code_6}, {uops_0_fu_code_6}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_6 = _GEN_17[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_18 = {{uops_0_fu_code_7}, {uops_14_fu_code_7}, {uops_13_fu_code_7}, {uops_12_fu_code_7}, {uops_11_fu_code_7}, {uops_10_fu_code_7}, {uops_9_fu_code_7}, {uops_8_fu_code_7}, {uops_7_fu_code_7}, {uops_6_fu_code_7}, {uops_5_fu_code_7}, {uops_4_fu_code_7}, {uops_3_fu_code_7}, {uops_2_fu_code_7}, {uops_1_fu_code_7}, {uops_0_fu_code_7}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_7 = _GEN_18[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_19 = {{uops_0_fu_code_8}, {uops_14_fu_code_8}, {uops_13_fu_code_8}, {uops_12_fu_code_8}, {uops_11_fu_code_8}, {uops_10_fu_code_8}, {uops_9_fu_code_8}, {uops_8_fu_code_8}, {uops_7_fu_code_8}, {uops_6_fu_code_8}, {uops_5_fu_code_8}, {uops_4_fu_code_8}, {uops_3_fu_code_8}, {uops_2_fu_code_8}, {uops_1_fu_code_8}, {uops_0_fu_code_8}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_8 = _GEN_19[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_20 = {{uops_0_fu_code_9}, {uops_14_fu_code_9}, {uops_13_fu_code_9}, {uops_12_fu_code_9}, {uops_11_fu_code_9}, {uops_10_fu_code_9}, {uops_9_fu_code_9}, {uops_8_fu_code_9}, {uops_7_fu_code_9}, {uops_6_fu_code_9}, {uops_5_fu_code_9}, {uops_4_fu_code_9}, {uops_3_fu_code_9}, {uops_2_fu_code_9}, {uops_1_fu_code_9}, {uops_0_fu_code_9}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_9 = _GEN_20[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_21 = {{uops_0_iw_issued}, {uops_14_iw_issued}, {uops_13_iw_issued}, {uops_12_iw_issued}, {uops_11_iw_issued}, {uops_10_iw_issued}, {uops_9_iw_issued}, {uops_8_iw_issued}, {uops_7_iw_issued}, {uops_6_iw_issued}, {uops_5_iw_issued}, {uops_4_iw_issued}, {uops_3_iw_issued}, {uops_2_iw_issued}, {uops_1_iw_issued}, {uops_0_iw_issued}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_issued = _GEN_21[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_22 = {{uops_0_iw_issued_partial_agen}, {uops_14_iw_issued_partial_agen}, {uops_13_iw_issued_partial_agen}, {uops_12_iw_issued_partial_agen}, {uops_11_iw_issued_partial_agen}, {uops_10_iw_issued_partial_agen}, {uops_9_iw_issued_partial_agen}, {uops_8_iw_issued_partial_agen}, {uops_7_iw_issued_partial_agen}, {uops_6_iw_issued_partial_agen}, {uops_5_iw_issued_partial_agen}, {uops_4_iw_issued_partial_agen}, {uops_3_iw_issued_partial_agen}, {uops_2_iw_issued_partial_agen}, {uops_1_iw_issued_partial_agen}, {uops_0_iw_issued_partial_agen}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_issued_partial_agen = _GEN_22[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_23 = {{uops_0_iw_issued_partial_dgen}, {uops_14_iw_issued_partial_dgen}, {uops_13_iw_issued_partial_dgen}, {uops_12_iw_issued_partial_dgen}, {uops_11_iw_issued_partial_dgen}, {uops_10_iw_issued_partial_dgen}, {uops_9_iw_issued_partial_dgen}, {uops_8_iw_issued_partial_dgen}, {uops_7_iw_issued_partial_dgen}, {uops_6_iw_issued_partial_dgen}, {uops_5_iw_issued_partial_dgen}, {uops_4_iw_issued_partial_dgen}, {uops_3_iw_issued_partial_dgen}, {uops_2_iw_issued_partial_dgen}, {uops_1_iw_issued_partial_dgen}, {uops_0_iw_issued_partial_dgen}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_issued_partial_dgen = _GEN_23[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_24 = {{uops_0_iw_p1_speculative_child}, {uops_14_iw_p1_speculative_child}, {uops_13_iw_p1_speculative_child}, {uops_12_iw_p1_speculative_child}, {uops_11_iw_p1_speculative_child}, {uops_10_iw_p1_speculative_child}, {uops_9_iw_p1_speculative_child}, {uops_8_iw_p1_speculative_child}, {uops_7_iw_p1_speculative_child}, {uops_6_iw_p1_speculative_child}, {uops_5_iw_p1_speculative_child}, {uops_4_iw_p1_speculative_child}, {uops_3_iw_p1_speculative_child}, {uops_2_iw_p1_speculative_child}, {uops_1_iw_p1_speculative_child}, {uops_0_iw_p1_speculative_child}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p1_speculative_child = _GEN_24[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_25 = {{uops_0_iw_p2_speculative_child}, {uops_14_iw_p2_speculative_child}, {uops_13_iw_p2_speculative_child}, {uops_12_iw_p2_speculative_child}, {uops_11_iw_p2_speculative_child}, {uops_10_iw_p2_speculative_child}, {uops_9_iw_p2_speculative_child}, {uops_8_iw_p2_speculative_child}, {uops_7_iw_p2_speculative_child}, {uops_6_iw_p2_speculative_child}, {uops_5_iw_p2_speculative_child}, {uops_4_iw_p2_speculative_child}, {uops_3_iw_p2_speculative_child}, {uops_2_iw_p2_speculative_child}, {uops_1_iw_p2_speculative_child}, {uops_0_iw_p2_speculative_child}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p2_speculative_child = _GEN_25[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_26 = {{uops_0_iw_p1_bypass_hint}, {uops_14_iw_p1_bypass_hint}, {uops_13_iw_p1_bypass_hint}, {uops_12_iw_p1_bypass_hint}, {uops_11_iw_p1_bypass_hint}, {uops_10_iw_p1_bypass_hint}, {uops_9_iw_p1_bypass_hint}, {uops_8_iw_p1_bypass_hint}, {uops_7_iw_p1_bypass_hint}, {uops_6_iw_p1_bypass_hint}, {uops_5_iw_p1_bypass_hint}, {uops_4_iw_p1_bypass_hint}, {uops_3_iw_p1_bypass_hint}, {uops_2_iw_p1_bypass_hint}, {uops_1_iw_p1_bypass_hint}, {uops_0_iw_p1_bypass_hint}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p1_bypass_hint = _GEN_26[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_27 = {{uops_0_iw_p2_bypass_hint}, {uops_14_iw_p2_bypass_hint}, {uops_13_iw_p2_bypass_hint}, {uops_12_iw_p2_bypass_hint}, {uops_11_iw_p2_bypass_hint}, {uops_10_iw_p2_bypass_hint}, {uops_9_iw_p2_bypass_hint}, {uops_8_iw_p2_bypass_hint}, {uops_7_iw_p2_bypass_hint}, {uops_6_iw_p2_bypass_hint}, {uops_5_iw_p2_bypass_hint}, {uops_4_iw_p2_bypass_hint}, {uops_3_iw_p2_bypass_hint}, {uops_2_iw_p2_bypass_hint}, {uops_1_iw_p2_bypass_hint}, {uops_0_iw_p2_bypass_hint}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p2_bypass_hint = _GEN_27[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_28 = {{uops_0_iw_p3_bypass_hint}, {uops_14_iw_p3_bypass_hint}, {uops_13_iw_p3_bypass_hint}, {uops_12_iw_p3_bypass_hint}, {uops_11_iw_p3_bypass_hint}, {uops_10_iw_p3_bypass_hint}, {uops_9_iw_p3_bypass_hint}, {uops_8_iw_p3_bypass_hint}, {uops_7_iw_p3_bypass_hint}, {uops_6_iw_p3_bypass_hint}, {uops_5_iw_p3_bypass_hint}, {uops_4_iw_p3_bypass_hint}, {uops_3_iw_p3_bypass_hint}, {uops_2_iw_p3_bypass_hint}, {uops_1_iw_p3_bypass_hint}, {uops_0_iw_p3_bypass_hint}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p3_bypass_hint = _GEN_28[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_29 = {{uops_0_dis_col_sel}, {uops_14_dis_col_sel}, {uops_13_dis_col_sel}, {uops_12_dis_col_sel}, {uops_11_dis_col_sel}, {uops_10_dis_col_sel}, {uops_9_dis_col_sel}, {uops_8_dis_col_sel}, {uops_7_dis_col_sel}, {uops_6_dis_col_sel}, {uops_5_dis_col_sel}, {uops_4_dis_col_sel}, {uops_3_dis_col_sel}, {uops_2_dis_col_sel}, {uops_1_dis_col_sel}, {uops_0_dis_col_sel}}; // @[util.scala:505:22, :547:21]
assign out_uop_dis_col_sel = _GEN_29[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_30 = {{uops_0_br_mask}, {uops_14_br_mask}, {uops_13_br_mask}, {uops_12_br_mask}, {uops_11_br_mask}, {uops_10_br_mask}, {uops_9_br_mask}, {uops_8_br_mask}, {uops_7_br_mask}, {uops_6_br_mask}, {uops_5_br_mask}, {uops_4_br_mask}, {uops_3_br_mask}, {uops_2_br_mask}, {uops_1_br_mask}, {uops_0_br_mask}}; // @[util.scala:505:22, :547:21]
assign out_uop_br_mask = _GEN_30[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_31 = {{uops_0_br_tag}, {uops_14_br_tag}, {uops_13_br_tag}, {uops_12_br_tag}, {uops_11_br_tag}, {uops_10_br_tag}, {uops_9_br_tag}, {uops_8_br_tag}, {uops_7_br_tag}, {uops_6_br_tag}, {uops_5_br_tag}, {uops_4_br_tag}, {uops_3_br_tag}, {uops_2_br_tag}, {uops_1_br_tag}, {uops_0_br_tag}}; // @[util.scala:505:22, :547:21]
assign out_uop_br_tag = _GEN_31[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_32 = {{uops_0_br_type}, {uops_14_br_type}, {uops_13_br_type}, {uops_12_br_type}, {uops_11_br_type}, {uops_10_br_type}, {uops_9_br_type}, {uops_8_br_type}, {uops_7_br_type}, {uops_6_br_type}, {uops_5_br_type}, {uops_4_br_type}, {uops_3_br_type}, {uops_2_br_type}, {uops_1_br_type}, {uops_0_br_type}}; // @[util.scala:505:22, :547:21]
assign out_uop_br_type = _GEN_32[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_33 = {{uops_0_is_sfb}, {uops_14_is_sfb}, {uops_13_is_sfb}, {uops_12_is_sfb}, {uops_11_is_sfb}, {uops_10_is_sfb}, {uops_9_is_sfb}, {uops_8_is_sfb}, {uops_7_is_sfb}, {uops_6_is_sfb}, {uops_5_is_sfb}, {uops_4_is_sfb}, {uops_3_is_sfb}, {uops_2_is_sfb}, {uops_1_is_sfb}, {uops_0_is_sfb}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_sfb = _GEN_33[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_34 = {{uops_0_is_fence}, {uops_14_is_fence}, {uops_13_is_fence}, {uops_12_is_fence}, {uops_11_is_fence}, {uops_10_is_fence}, {uops_9_is_fence}, {uops_8_is_fence}, {uops_7_is_fence}, {uops_6_is_fence}, {uops_5_is_fence}, {uops_4_is_fence}, {uops_3_is_fence}, {uops_2_is_fence}, {uops_1_is_fence}, {uops_0_is_fence}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_fence = _GEN_34[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_35 = {{uops_0_is_fencei}, {uops_14_is_fencei}, {uops_13_is_fencei}, {uops_12_is_fencei}, {uops_11_is_fencei}, {uops_10_is_fencei}, {uops_9_is_fencei}, {uops_8_is_fencei}, {uops_7_is_fencei}, {uops_6_is_fencei}, {uops_5_is_fencei}, {uops_4_is_fencei}, {uops_3_is_fencei}, {uops_2_is_fencei}, {uops_1_is_fencei}, {uops_0_is_fencei}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_fencei = _GEN_35[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_36 = {{uops_0_is_sfence}, {uops_14_is_sfence}, {uops_13_is_sfence}, {uops_12_is_sfence}, {uops_11_is_sfence}, {uops_10_is_sfence}, {uops_9_is_sfence}, {uops_8_is_sfence}, {uops_7_is_sfence}, {uops_6_is_sfence}, {uops_5_is_sfence}, {uops_4_is_sfence}, {uops_3_is_sfence}, {uops_2_is_sfence}, {uops_1_is_sfence}, {uops_0_is_sfence}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_sfence = _GEN_36[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_37 = {{uops_0_is_amo}, {uops_14_is_amo}, {uops_13_is_amo}, {uops_12_is_amo}, {uops_11_is_amo}, {uops_10_is_amo}, {uops_9_is_amo}, {uops_8_is_amo}, {uops_7_is_amo}, {uops_6_is_amo}, {uops_5_is_amo}, {uops_4_is_amo}, {uops_3_is_amo}, {uops_2_is_amo}, {uops_1_is_amo}, {uops_0_is_amo}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_amo = _GEN_37[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_38 = {{uops_0_is_eret}, {uops_14_is_eret}, {uops_13_is_eret}, {uops_12_is_eret}, {uops_11_is_eret}, {uops_10_is_eret}, {uops_9_is_eret}, {uops_8_is_eret}, {uops_7_is_eret}, {uops_6_is_eret}, {uops_5_is_eret}, {uops_4_is_eret}, {uops_3_is_eret}, {uops_2_is_eret}, {uops_1_is_eret}, {uops_0_is_eret}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_eret = _GEN_38[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_39 = {{uops_0_is_sys_pc2epc}, {uops_14_is_sys_pc2epc}, {uops_13_is_sys_pc2epc}, {uops_12_is_sys_pc2epc}, {uops_11_is_sys_pc2epc}, {uops_10_is_sys_pc2epc}, {uops_9_is_sys_pc2epc}, {uops_8_is_sys_pc2epc}, {uops_7_is_sys_pc2epc}, {uops_6_is_sys_pc2epc}, {uops_5_is_sys_pc2epc}, {uops_4_is_sys_pc2epc}, {uops_3_is_sys_pc2epc}, {uops_2_is_sys_pc2epc}, {uops_1_is_sys_pc2epc}, {uops_0_is_sys_pc2epc}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_sys_pc2epc = _GEN_39[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_40 = {{uops_0_is_rocc}, {uops_14_is_rocc}, {uops_13_is_rocc}, {uops_12_is_rocc}, {uops_11_is_rocc}, {uops_10_is_rocc}, {uops_9_is_rocc}, {uops_8_is_rocc}, {uops_7_is_rocc}, {uops_6_is_rocc}, {uops_5_is_rocc}, {uops_4_is_rocc}, {uops_3_is_rocc}, {uops_2_is_rocc}, {uops_1_is_rocc}, {uops_0_is_rocc}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_rocc = _GEN_40[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_41 = {{uops_0_is_mov}, {uops_14_is_mov}, {uops_13_is_mov}, {uops_12_is_mov}, {uops_11_is_mov}, {uops_10_is_mov}, {uops_9_is_mov}, {uops_8_is_mov}, {uops_7_is_mov}, {uops_6_is_mov}, {uops_5_is_mov}, {uops_4_is_mov}, {uops_3_is_mov}, {uops_2_is_mov}, {uops_1_is_mov}, {uops_0_is_mov}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_mov = _GEN_41[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_42 = {{uops_0_ftq_idx}, {uops_14_ftq_idx}, {uops_13_ftq_idx}, {uops_12_ftq_idx}, {uops_11_ftq_idx}, {uops_10_ftq_idx}, {uops_9_ftq_idx}, {uops_8_ftq_idx}, {uops_7_ftq_idx}, {uops_6_ftq_idx}, {uops_5_ftq_idx}, {uops_4_ftq_idx}, {uops_3_ftq_idx}, {uops_2_ftq_idx}, {uops_1_ftq_idx}, {uops_0_ftq_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_ftq_idx = _GEN_42[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_43 = {{uops_0_edge_inst}, {uops_14_edge_inst}, {uops_13_edge_inst}, {uops_12_edge_inst}, {uops_11_edge_inst}, {uops_10_edge_inst}, {uops_9_edge_inst}, {uops_8_edge_inst}, {uops_7_edge_inst}, {uops_6_edge_inst}, {uops_5_edge_inst}, {uops_4_edge_inst}, {uops_3_edge_inst}, {uops_2_edge_inst}, {uops_1_edge_inst}, {uops_0_edge_inst}}; // @[util.scala:505:22, :547:21]
assign out_uop_edge_inst = _GEN_43[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_44 = {{uops_0_pc_lob}, {uops_14_pc_lob}, {uops_13_pc_lob}, {uops_12_pc_lob}, {uops_11_pc_lob}, {uops_10_pc_lob}, {uops_9_pc_lob}, {uops_8_pc_lob}, {uops_7_pc_lob}, {uops_6_pc_lob}, {uops_5_pc_lob}, {uops_4_pc_lob}, {uops_3_pc_lob}, {uops_2_pc_lob}, {uops_1_pc_lob}, {uops_0_pc_lob}}; // @[util.scala:505:22, :547:21]
assign out_uop_pc_lob = _GEN_44[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_45 = {{uops_0_taken}, {uops_14_taken}, {uops_13_taken}, {uops_12_taken}, {uops_11_taken}, {uops_10_taken}, {uops_9_taken}, {uops_8_taken}, {uops_7_taken}, {uops_6_taken}, {uops_5_taken}, {uops_4_taken}, {uops_3_taken}, {uops_2_taken}, {uops_1_taken}, {uops_0_taken}}; // @[util.scala:505:22, :547:21]
assign out_uop_taken = _GEN_45[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_46 = {{uops_0_imm_rename}, {uops_14_imm_rename}, {uops_13_imm_rename}, {uops_12_imm_rename}, {uops_11_imm_rename}, {uops_10_imm_rename}, {uops_9_imm_rename}, {uops_8_imm_rename}, {uops_7_imm_rename}, {uops_6_imm_rename}, {uops_5_imm_rename}, {uops_4_imm_rename}, {uops_3_imm_rename}, {uops_2_imm_rename}, {uops_1_imm_rename}, {uops_0_imm_rename}}; // @[util.scala:505:22, :547:21]
assign out_uop_imm_rename = _GEN_46[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][2:0] _GEN_47 = {{uops_0_imm_sel}, {uops_14_imm_sel}, {uops_13_imm_sel}, {uops_12_imm_sel}, {uops_11_imm_sel}, {uops_10_imm_sel}, {uops_9_imm_sel}, {uops_8_imm_sel}, {uops_7_imm_sel}, {uops_6_imm_sel}, {uops_5_imm_sel}, {uops_4_imm_sel}, {uops_3_imm_sel}, {uops_2_imm_sel}, {uops_1_imm_sel}, {uops_0_imm_sel}}; // @[util.scala:505:22, :547:21]
assign out_uop_imm_sel = _GEN_47[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][4:0] _GEN_48 = {{uops_0_pimm}, {uops_14_pimm}, {uops_13_pimm}, {uops_12_pimm}, {uops_11_pimm}, {uops_10_pimm}, {uops_9_pimm}, {uops_8_pimm}, {uops_7_pimm}, {uops_6_pimm}, {uops_5_pimm}, {uops_4_pimm}, {uops_3_pimm}, {uops_2_pimm}, {uops_1_pimm}, {uops_0_pimm}}; // @[util.scala:505:22, :547:21]
assign out_uop_pimm = _GEN_48[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][19:0] _GEN_49 = {{uops_0_imm_packed}, {uops_14_imm_packed}, {uops_13_imm_packed}, {uops_12_imm_packed}, {uops_11_imm_packed}, {uops_10_imm_packed}, {uops_9_imm_packed}, {uops_8_imm_packed}, {uops_7_imm_packed}, {uops_6_imm_packed}, {uops_5_imm_packed}, {uops_4_imm_packed}, {uops_3_imm_packed}, {uops_2_imm_packed}, {uops_1_imm_packed}, {uops_0_imm_packed}}; // @[util.scala:505:22, :547:21]
assign out_uop_imm_packed = _GEN_49[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_50 = {{uops_0_op1_sel}, {uops_14_op1_sel}, {uops_13_op1_sel}, {uops_12_op1_sel}, {uops_11_op1_sel}, {uops_10_op1_sel}, {uops_9_op1_sel}, {uops_8_op1_sel}, {uops_7_op1_sel}, {uops_6_op1_sel}, {uops_5_op1_sel}, {uops_4_op1_sel}, {uops_3_op1_sel}, {uops_2_op1_sel}, {uops_1_op1_sel}, {uops_0_op1_sel}}; // @[util.scala:505:22, :547:21]
assign out_uop_op1_sel = _GEN_50[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][2:0] _GEN_51 = {{uops_0_op2_sel}, {uops_14_op2_sel}, {uops_13_op2_sel}, {uops_12_op2_sel}, {uops_11_op2_sel}, {uops_10_op2_sel}, {uops_9_op2_sel}, {uops_8_op2_sel}, {uops_7_op2_sel}, {uops_6_op2_sel}, {uops_5_op2_sel}, {uops_4_op2_sel}, {uops_3_op2_sel}, {uops_2_op2_sel}, {uops_1_op2_sel}, {uops_0_op2_sel}}; // @[util.scala:505:22, :547:21]
assign out_uop_op2_sel = _GEN_51[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_52 = {{uops_0_fp_ctrl_ldst}, {uops_14_fp_ctrl_ldst}, {uops_13_fp_ctrl_ldst}, {uops_12_fp_ctrl_ldst}, {uops_11_fp_ctrl_ldst}, {uops_10_fp_ctrl_ldst}, {uops_9_fp_ctrl_ldst}, {uops_8_fp_ctrl_ldst}, {uops_7_fp_ctrl_ldst}, {uops_6_fp_ctrl_ldst}, {uops_5_fp_ctrl_ldst}, {uops_4_fp_ctrl_ldst}, {uops_3_fp_ctrl_ldst}, {uops_2_fp_ctrl_ldst}, {uops_1_fp_ctrl_ldst}, {uops_0_fp_ctrl_ldst}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_ldst = _GEN_52[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_53 = {{uops_0_fp_ctrl_wen}, {uops_14_fp_ctrl_wen}, {uops_13_fp_ctrl_wen}, {uops_12_fp_ctrl_wen}, {uops_11_fp_ctrl_wen}, {uops_10_fp_ctrl_wen}, {uops_9_fp_ctrl_wen}, {uops_8_fp_ctrl_wen}, {uops_7_fp_ctrl_wen}, {uops_6_fp_ctrl_wen}, {uops_5_fp_ctrl_wen}, {uops_4_fp_ctrl_wen}, {uops_3_fp_ctrl_wen}, {uops_2_fp_ctrl_wen}, {uops_1_fp_ctrl_wen}, {uops_0_fp_ctrl_wen}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_wen = _GEN_53[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_54 = {{uops_0_fp_ctrl_ren1}, {uops_14_fp_ctrl_ren1}, {uops_13_fp_ctrl_ren1}, {uops_12_fp_ctrl_ren1}, {uops_11_fp_ctrl_ren1}, {uops_10_fp_ctrl_ren1}, {uops_9_fp_ctrl_ren1}, {uops_8_fp_ctrl_ren1}, {uops_7_fp_ctrl_ren1}, {uops_6_fp_ctrl_ren1}, {uops_5_fp_ctrl_ren1}, {uops_4_fp_ctrl_ren1}, {uops_3_fp_ctrl_ren1}, {uops_2_fp_ctrl_ren1}, {uops_1_fp_ctrl_ren1}, {uops_0_fp_ctrl_ren1}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_ren1 = _GEN_54[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_55 = {{uops_0_fp_ctrl_ren2}, {uops_14_fp_ctrl_ren2}, {uops_13_fp_ctrl_ren2}, {uops_12_fp_ctrl_ren2}, {uops_11_fp_ctrl_ren2}, {uops_10_fp_ctrl_ren2}, {uops_9_fp_ctrl_ren2}, {uops_8_fp_ctrl_ren2}, {uops_7_fp_ctrl_ren2}, {uops_6_fp_ctrl_ren2}, {uops_5_fp_ctrl_ren2}, {uops_4_fp_ctrl_ren2}, {uops_3_fp_ctrl_ren2}, {uops_2_fp_ctrl_ren2}, {uops_1_fp_ctrl_ren2}, {uops_0_fp_ctrl_ren2}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_ren2 = _GEN_55[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_56 = {{uops_0_fp_ctrl_ren3}, {uops_14_fp_ctrl_ren3}, {uops_13_fp_ctrl_ren3}, {uops_12_fp_ctrl_ren3}, {uops_11_fp_ctrl_ren3}, {uops_10_fp_ctrl_ren3}, {uops_9_fp_ctrl_ren3}, {uops_8_fp_ctrl_ren3}, {uops_7_fp_ctrl_ren3}, {uops_6_fp_ctrl_ren3}, {uops_5_fp_ctrl_ren3}, {uops_4_fp_ctrl_ren3}, {uops_3_fp_ctrl_ren3}, {uops_2_fp_ctrl_ren3}, {uops_1_fp_ctrl_ren3}, {uops_0_fp_ctrl_ren3}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_ren3 = _GEN_56[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_57 = {{uops_0_fp_ctrl_swap12}, {uops_14_fp_ctrl_swap12}, {uops_13_fp_ctrl_swap12}, {uops_12_fp_ctrl_swap12}, {uops_11_fp_ctrl_swap12}, {uops_10_fp_ctrl_swap12}, {uops_9_fp_ctrl_swap12}, {uops_8_fp_ctrl_swap12}, {uops_7_fp_ctrl_swap12}, {uops_6_fp_ctrl_swap12}, {uops_5_fp_ctrl_swap12}, {uops_4_fp_ctrl_swap12}, {uops_3_fp_ctrl_swap12}, {uops_2_fp_ctrl_swap12}, {uops_1_fp_ctrl_swap12}, {uops_0_fp_ctrl_swap12}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_swap12 = _GEN_57[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_58 = {{uops_0_fp_ctrl_swap23}, {uops_14_fp_ctrl_swap23}, {uops_13_fp_ctrl_swap23}, {uops_12_fp_ctrl_swap23}, {uops_11_fp_ctrl_swap23}, {uops_10_fp_ctrl_swap23}, {uops_9_fp_ctrl_swap23}, {uops_8_fp_ctrl_swap23}, {uops_7_fp_ctrl_swap23}, {uops_6_fp_ctrl_swap23}, {uops_5_fp_ctrl_swap23}, {uops_4_fp_ctrl_swap23}, {uops_3_fp_ctrl_swap23}, {uops_2_fp_ctrl_swap23}, {uops_1_fp_ctrl_swap23}, {uops_0_fp_ctrl_swap23}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_swap23 = _GEN_58[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_59 = {{uops_0_fp_ctrl_typeTagIn}, {uops_14_fp_ctrl_typeTagIn}, {uops_13_fp_ctrl_typeTagIn}, {uops_12_fp_ctrl_typeTagIn}, {uops_11_fp_ctrl_typeTagIn}, {uops_10_fp_ctrl_typeTagIn}, {uops_9_fp_ctrl_typeTagIn}, {uops_8_fp_ctrl_typeTagIn}, {uops_7_fp_ctrl_typeTagIn}, {uops_6_fp_ctrl_typeTagIn}, {uops_5_fp_ctrl_typeTagIn}, {uops_4_fp_ctrl_typeTagIn}, {uops_3_fp_ctrl_typeTagIn}, {uops_2_fp_ctrl_typeTagIn}, {uops_1_fp_ctrl_typeTagIn}, {uops_0_fp_ctrl_typeTagIn}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_typeTagIn = _GEN_59[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_60 = {{uops_0_fp_ctrl_typeTagOut}, {uops_14_fp_ctrl_typeTagOut}, {uops_13_fp_ctrl_typeTagOut}, {uops_12_fp_ctrl_typeTagOut}, {uops_11_fp_ctrl_typeTagOut}, {uops_10_fp_ctrl_typeTagOut}, {uops_9_fp_ctrl_typeTagOut}, {uops_8_fp_ctrl_typeTagOut}, {uops_7_fp_ctrl_typeTagOut}, {uops_6_fp_ctrl_typeTagOut}, {uops_5_fp_ctrl_typeTagOut}, {uops_4_fp_ctrl_typeTagOut}, {uops_3_fp_ctrl_typeTagOut}, {uops_2_fp_ctrl_typeTagOut}, {uops_1_fp_ctrl_typeTagOut}, {uops_0_fp_ctrl_typeTagOut}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_typeTagOut = _GEN_60[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_61 = {{uops_0_fp_ctrl_fromint}, {uops_14_fp_ctrl_fromint}, {uops_13_fp_ctrl_fromint}, {uops_12_fp_ctrl_fromint}, {uops_11_fp_ctrl_fromint}, {uops_10_fp_ctrl_fromint}, {uops_9_fp_ctrl_fromint}, {uops_8_fp_ctrl_fromint}, {uops_7_fp_ctrl_fromint}, {uops_6_fp_ctrl_fromint}, {uops_5_fp_ctrl_fromint}, {uops_4_fp_ctrl_fromint}, {uops_3_fp_ctrl_fromint}, {uops_2_fp_ctrl_fromint}, {uops_1_fp_ctrl_fromint}, {uops_0_fp_ctrl_fromint}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_fromint = _GEN_61[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_62 = {{uops_0_fp_ctrl_toint}, {uops_14_fp_ctrl_toint}, {uops_13_fp_ctrl_toint}, {uops_12_fp_ctrl_toint}, {uops_11_fp_ctrl_toint}, {uops_10_fp_ctrl_toint}, {uops_9_fp_ctrl_toint}, {uops_8_fp_ctrl_toint}, {uops_7_fp_ctrl_toint}, {uops_6_fp_ctrl_toint}, {uops_5_fp_ctrl_toint}, {uops_4_fp_ctrl_toint}, {uops_3_fp_ctrl_toint}, {uops_2_fp_ctrl_toint}, {uops_1_fp_ctrl_toint}, {uops_0_fp_ctrl_toint}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_toint = _GEN_62[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_63 = {{uops_0_fp_ctrl_fastpipe}, {uops_14_fp_ctrl_fastpipe}, {uops_13_fp_ctrl_fastpipe}, {uops_12_fp_ctrl_fastpipe}, {uops_11_fp_ctrl_fastpipe}, {uops_10_fp_ctrl_fastpipe}, {uops_9_fp_ctrl_fastpipe}, {uops_8_fp_ctrl_fastpipe}, {uops_7_fp_ctrl_fastpipe}, {uops_6_fp_ctrl_fastpipe}, {uops_5_fp_ctrl_fastpipe}, {uops_4_fp_ctrl_fastpipe}, {uops_3_fp_ctrl_fastpipe}, {uops_2_fp_ctrl_fastpipe}, {uops_1_fp_ctrl_fastpipe}, {uops_0_fp_ctrl_fastpipe}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_fastpipe = _GEN_63[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_64 = {{uops_0_fp_ctrl_fma}, {uops_14_fp_ctrl_fma}, {uops_13_fp_ctrl_fma}, {uops_12_fp_ctrl_fma}, {uops_11_fp_ctrl_fma}, {uops_10_fp_ctrl_fma}, {uops_9_fp_ctrl_fma}, {uops_8_fp_ctrl_fma}, {uops_7_fp_ctrl_fma}, {uops_6_fp_ctrl_fma}, {uops_5_fp_ctrl_fma}, {uops_4_fp_ctrl_fma}, {uops_3_fp_ctrl_fma}, {uops_2_fp_ctrl_fma}, {uops_1_fp_ctrl_fma}, {uops_0_fp_ctrl_fma}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_fma = _GEN_64[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_65 = {{uops_0_fp_ctrl_div}, {uops_14_fp_ctrl_div}, {uops_13_fp_ctrl_div}, {uops_12_fp_ctrl_div}, {uops_11_fp_ctrl_div}, {uops_10_fp_ctrl_div}, {uops_9_fp_ctrl_div}, {uops_8_fp_ctrl_div}, {uops_7_fp_ctrl_div}, {uops_6_fp_ctrl_div}, {uops_5_fp_ctrl_div}, {uops_4_fp_ctrl_div}, {uops_3_fp_ctrl_div}, {uops_2_fp_ctrl_div}, {uops_1_fp_ctrl_div}, {uops_0_fp_ctrl_div}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_div = _GEN_65[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_66 = {{uops_0_fp_ctrl_sqrt}, {uops_14_fp_ctrl_sqrt}, {uops_13_fp_ctrl_sqrt}, {uops_12_fp_ctrl_sqrt}, {uops_11_fp_ctrl_sqrt}, {uops_10_fp_ctrl_sqrt}, {uops_9_fp_ctrl_sqrt}, {uops_8_fp_ctrl_sqrt}, {uops_7_fp_ctrl_sqrt}, {uops_6_fp_ctrl_sqrt}, {uops_5_fp_ctrl_sqrt}, {uops_4_fp_ctrl_sqrt}, {uops_3_fp_ctrl_sqrt}, {uops_2_fp_ctrl_sqrt}, {uops_1_fp_ctrl_sqrt}, {uops_0_fp_ctrl_sqrt}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_sqrt = _GEN_66[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_67 = {{uops_0_fp_ctrl_wflags}, {uops_14_fp_ctrl_wflags}, {uops_13_fp_ctrl_wflags}, {uops_12_fp_ctrl_wflags}, {uops_11_fp_ctrl_wflags}, {uops_10_fp_ctrl_wflags}, {uops_9_fp_ctrl_wflags}, {uops_8_fp_ctrl_wflags}, {uops_7_fp_ctrl_wflags}, {uops_6_fp_ctrl_wflags}, {uops_5_fp_ctrl_wflags}, {uops_4_fp_ctrl_wflags}, {uops_3_fp_ctrl_wflags}, {uops_2_fp_ctrl_wflags}, {uops_1_fp_ctrl_wflags}, {uops_0_fp_ctrl_wflags}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_wflags = _GEN_67[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_68 = {{uops_0_fp_ctrl_vec}, {uops_14_fp_ctrl_vec}, {uops_13_fp_ctrl_vec}, {uops_12_fp_ctrl_vec}, {uops_11_fp_ctrl_vec}, {uops_10_fp_ctrl_vec}, {uops_9_fp_ctrl_vec}, {uops_8_fp_ctrl_vec}, {uops_7_fp_ctrl_vec}, {uops_6_fp_ctrl_vec}, {uops_5_fp_ctrl_vec}, {uops_4_fp_ctrl_vec}, {uops_3_fp_ctrl_vec}, {uops_2_fp_ctrl_vec}, {uops_1_fp_ctrl_vec}, {uops_0_fp_ctrl_vec}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_vec = _GEN_68[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][4:0] _GEN_69 = {{uops_0_rob_idx}, {uops_14_rob_idx}, {uops_13_rob_idx}, {uops_12_rob_idx}, {uops_11_rob_idx}, {uops_10_rob_idx}, {uops_9_rob_idx}, {uops_8_rob_idx}, {uops_7_rob_idx}, {uops_6_rob_idx}, {uops_5_rob_idx}, {uops_4_rob_idx}, {uops_3_rob_idx}, {uops_2_rob_idx}, {uops_1_rob_idx}, {uops_0_rob_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_rob_idx = _GEN_69[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_70 = {{uops_0_ldq_idx}, {uops_14_ldq_idx}, {uops_13_ldq_idx}, {uops_12_ldq_idx}, {uops_11_ldq_idx}, {uops_10_ldq_idx}, {uops_9_ldq_idx}, {uops_8_ldq_idx}, {uops_7_ldq_idx}, {uops_6_ldq_idx}, {uops_5_ldq_idx}, {uops_4_ldq_idx}, {uops_3_ldq_idx}, {uops_2_ldq_idx}, {uops_1_ldq_idx}, {uops_0_ldq_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_ldq_idx = _GEN_70[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_71 = {{uops_0_stq_idx}, {uops_14_stq_idx}, {uops_13_stq_idx}, {uops_12_stq_idx}, {uops_11_stq_idx}, {uops_10_stq_idx}, {uops_9_stq_idx}, {uops_8_stq_idx}, {uops_7_stq_idx}, {uops_6_stq_idx}, {uops_5_stq_idx}, {uops_4_stq_idx}, {uops_3_stq_idx}, {uops_2_stq_idx}, {uops_1_stq_idx}, {uops_0_stq_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_stq_idx = _GEN_71[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_72 = {{uops_0_rxq_idx}, {uops_14_rxq_idx}, {uops_13_rxq_idx}, {uops_12_rxq_idx}, {uops_11_rxq_idx}, {uops_10_rxq_idx}, {uops_9_rxq_idx}, {uops_8_rxq_idx}, {uops_7_rxq_idx}, {uops_6_rxq_idx}, {uops_5_rxq_idx}, {uops_4_rxq_idx}, {uops_3_rxq_idx}, {uops_2_rxq_idx}, {uops_1_rxq_idx}, {uops_0_rxq_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_rxq_idx = _GEN_72[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_73 = {{uops_0_pdst}, {uops_14_pdst}, {uops_13_pdst}, {uops_12_pdst}, {uops_11_pdst}, {uops_10_pdst}, {uops_9_pdst}, {uops_8_pdst}, {uops_7_pdst}, {uops_6_pdst}, {uops_5_pdst}, {uops_4_pdst}, {uops_3_pdst}, {uops_2_pdst}, {uops_1_pdst}, {uops_0_pdst}}; // @[util.scala:505:22, :547:21]
assign out_uop_pdst = _GEN_73[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_74 = {{uops_0_prs1}, {uops_14_prs1}, {uops_13_prs1}, {uops_12_prs1}, {uops_11_prs1}, {uops_10_prs1}, {uops_9_prs1}, {uops_8_prs1}, {uops_7_prs1}, {uops_6_prs1}, {uops_5_prs1}, {uops_4_prs1}, {uops_3_prs1}, {uops_2_prs1}, {uops_1_prs1}, {uops_0_prs1}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs1 = _GEN_74[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_75 = {{uops_0_prs2}, {uops_14_prs2}, {uops_13_prs2}, {uops_12_prs2}, {uops_11_prs2}, {uops_10_prs2}, {uops_9_prs2}, {uops_8_prs2}, {uops_7_prs2}, {uops_6_prs2}, {uops_5_prs2}, {uops_4_prs2}, {uops_3_prs2}, {uops_2_prs2}, {uops_1_prs2}, {uops_0_prs2}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs2 = _GEN_75[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_76 = {{uops_0_prs3}, {uops_14_prs3}, {uops_13_prs3}, {uops_12_prs3}, {uops_11_prs3}, {uops_10_prs3}, {uops_9_prs3}, {uops_8_prs3}, {uops_7_prs3}, {uops_6_prs3}, {uops_5_prs3}, {uops_4_prs3}, {uops_3_prs3}, {uops_2_prs3}, {uops_1_prs3}, {uops_0_prs3}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs3 = _GEN_76[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_77 = {{uops_0_ppred}, {uops_14_ppred}, {uops_13_ppred}, {uops_12_ppred}, {uops_11_ppred}, {uops_10_ppred}, {uops_9_ppred}, {uops_8_ppred}, {uops_7_ppred}, {uops_6_ppred}, {uops_5_ppred}, {uops_4_ppred}, {uops_3_ppred}, {uops_2_ppred}, {uops_1_ppred}, {uops_0_ppred}}; // @[util.scala:505:22, :547:21]
assign out_uop_ppred = _GEN_77[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_78 = {{uops_0_prs1_busy}, {uops_14_prs1_busy}, {uops_13_prs1_busy}, {uops_12_prs1_busy}, {uops_11_prs1_busy}, {uops_10_prs1_busy}, {uops_9_prs1_busy}, {uops_8_prs1_busy}, {uops_7_prs1_busy}, {uops_6_prs1_busy}, {uops_5_prs1_busy}, {uops_4_prs1_busy}, {uops_3_prs1_busy}, {uops_2_prs1_busy}, {uops_1_prs1_busy}, {uops_0_prs1_busy}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs1_busy = _GEN_78[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_79 = {{uops_0_prs2_busy}, {uops_14_prs2_busy}, {uops_13_prs2_busy}, {uops_12_prs2_busy}, {uops_11_prs2_busy}, {uops_10_prs2_busy}, {uops_9_prs2_busy}, {uops_8_prs2_busy}, {uops_7_prs2_busy}, {uops_6_prs2_busy}, {uops_5_prs2_busy}, {uops_4_prs2_busy}, {uops_3_prs2_busy}, {uops_2_prs2_busy}, {uops_1_prs2_busy}, {uops_0_prs2_busy}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs2_busy = _GEN_79[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_80 = {{uops_0_prs3_busy}, {uops_14_prs3_busy}, {uops_13_prs3_busy}, {uops_12_prs3_busy}, {uops_11_prs3_busy}, {uops_10_prs3_busy}, {uops_9_prs3_busy}, {uops_8_prs3_busy}, {uops_7_prs3_busy}, {uops_6_prs3_busy}, {uops_5_prs3_busy}, {uops_4_prs3_busy}, {uops_3_prs3_busy}, {uops_2_prs3_busy}, {uops_1_prs3_busy}, {uops_0_prs3_busy}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs3_busy = _GEN_80[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_81 = {{uops_0_ppred_busy}, {uops_14_ppred_busy}, {uops_13_ppred_busy}, {uops_12_ppred_busy}, {uops_11_ppred_busy}, {uops_10_ppred_busy}, {uops_9_ppred_busy}, {uops_8_ppred_busy}, {uops_7_ppred_busy}, {uops_6_ppred_busy}, {uops_5_ppred_busy}, {uops_4_ppred_busy}, {uops_3_ppred_busy}, {uops_2_ppred_busy}, {uops_1_ppred_busy}, {uops_0_ppred_busy}}; // @[util.scala:505:22, :547:21]
assign out_uop_ppred_busy = _GEN_81[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_82 = {{uops_0_stale_pdst}, {uops_14_stale_pdst}, {uops_13_stale_pdst}, {uops_12_stale_pdst}, {uops_11_stale_pdst}, {uops_10_stale_pdst}, {uops_9_stale_pdst}, {uops_8_stale_pdst}, {uops_7_stale_pdst}, {uops_6_stale_pdst}, {uops_5_stale_pdst}, {uops_4_stale_pdst}, {uops_3_stale_pdst}, {uops_2_stale_pdst}, {uops_1_stale_pdst}, {uops_0_stale_pdst}}; // @[util.scala:505:22, :547:21]
assign out_uop_stale_pdst = _GEN_82[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_83 = {{uops_0_exception}, {uops_14_exception}, {uops_13_exception}, {uops_12_exception}, {uops_11_exception}, {uops_10_exception}, {uops_9_exception}, {uops_8_exception}, {uops_7_exception}, {uops_6_exception}, {uops_5_exception}, {uops_4_exception}, {uops_3_exception}, {uops_2_exception}, {uops_1_exception}, {uops_0_exception}}; // @[util.scala:505:22, :547:21]
assign out_uop_exception = _GEN_83[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][63:0] _GEN_84 = {{uops_0_exc_cause}, {uops_14_exc_cause}, {uops_13_exc_cause}, {uops_12_exc_cause}, {uops_11_exc_cause}, {uops_10_exc_cause}, {uops_9_exc_cause}, {uops_8_exc_cause}, {uops_7_exc_cause}, {uops_6_exc_cause}, {uops_5_exc_cause}, {uops_4_exc_cause}, {uops_3_exc_cause}, {uops_2_exc_cause}, {uops_1_exc_cause}, {uops_0_exc_cause}}; // @[util.scala:505:22, :547:21]
assign out_uop_exc_cause = _GEN_84[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][4:0] _GEN_85 = {{uops_0_mem_cmd}, {uops_14_mem_cmd}, {uops_13_mem_cmd}, {uops_12_mem_cmd}, {uops_11_mem_cmd}, {uops_10_mem_cmd}, {uops_9_mem_cmd}, {uops_8_mem_cmd}, {uops_7_mem_cmd}, {uops_6_mem_cmd}, {uops_5_mem_cmd}, {uops_4_mem_cmd}, {uops_3_mem_cmd}, {uops_2_mem_cmd}, {uops_1_mem_cmd}, {uops_0_mem_cmd}}; // @[util.scala:505:22, :547:21]
assign out_uop_mem_cmd = _GEN_85[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_86 = {{uops_0_mem_size}, {uops_14_mem_size}, {uops_13_mem_size}, {uops_12_mem_size}, {uops_11_mem_size}, {uops_10_mem_size}, {uops_9_mem_size}, {uops_8_mem_size}, {uops_7_mem_size}, {uops_6_mem_size}, {uops_5_mem_size}, {uops_4_mem_size}, {uops_3_mem_size}, {uops_2_mem_size}, {uops_1_mem_size}, {uops_0_mem_size}}; // @[util.scala:505:22, :547:21]
assign out_uop_mem_size = _GEN_86[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_87 = {{uops_0_mem_signed}, {uops_14_mem_signed}, {uops_13_mem_signed}, {uops_12_mem_signed}, {uops_11_mem_signed}, {uops_10_mem_signed}, {uops_9_mem_signed}, {uops_8_mem_signed}, {uops_7_mem_signed}, {uops_6_mem_signed}, {uops_5_mem_signed}, {uops_4_mem_signed}, {uops_3_mem_signed}, {uops_2_mem_signed}, {uops_1_mem_signed}, {uops_0_mem_signed}}; // @[util.scala:505:22, :547:21]
assign out_uop_mem_signed = _GEN_87[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_88 = {{uops_0_uses_ldq}, {uops_14_uses_ldq}, {uops_13_uses_ldq}, {uops_12_uses_ldq}, {uops_11_uses_ldq}, {uops_10_uses_ldq}, {uops_9_uses_ldq}, {uops_8_uses_ldq}, {uops_7_uses_ldq}, {uops_6_uses_ldq}, {uops_5_uses_ldq}, {uops_4_uses_ldq}, {uops_3_uses_ldq}, {uops_2_uses_ldq}, {uops_1_uses_ldq}, {uops_0_uses_ldq}}; // @[util.scala:505:22, :547:21]
assign out_uop_uses_ldq = _GEN_88[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_89 = {{uops_0_uses_stq}, {uops_14_uses_stq}, {uops_13_uses_stq}, {uops_12_uses_stq}, {uops_11_uses_stq}, {uops_10_uses_stq}, {uops_9_uses_stq}, {uops_8_uses_stq}, {uops_7_uses_stq}, {uops_6_uses_stq}, {uops_5_uses_stq}, {uops_4_uses_stq}, {uops_3_uses_stq}, {uops_2_uses_stq}, {uops_1_uses_stq}, {uops_0_uses_stq}}; // @[util.scala:505:22, :547:21]
assign out_uop_uses_stq = _GEN_89[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_90 = {{uops_0_is_unique}, {uops_14_is_unique}, {uops_13_is_unique}, {uops_12_is_unique}, {uops_11_is_unique}, {uops_10_is_unique}, {uops_9_is_unique}, {uops_8_is_unique}, {uops_7_is_unique}, {uops_6_is_unique}, {uops_5_is_unique}, {uops_4_is_unique}, {uops_3_is_unique}, {uops_2_is_unique}, {uops_1_is_unique}, {uops_0_is_unique}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_unique = _GEN_90[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_91 = {{uops_0_flush_on_commit}, {uops_14_flush_on_commit}, {uops_13_flush_on_commit}, {uops_12_flush_on_commit}, {uops_11_flush_on_commit}, {uops_10_flush_on_commit}, {uops_9_flush_on_commit}, {uops_8_flush_on_commit}, {uops_7_flush_on_commit}, {uops_6_flush_on_commit}, {uops_5_flush_on_commit}, {uops_4_flush_on_commit}, {uops_3_flush_on_commit}, {uops_2_flush_on_commit}, {uops_1_flush_on_commit}, {uops_0_flush_on_commit}}; // @[util.scala:505:22, :547:21]
assign out_uop_flush_on_commit = _GEN_91[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][2:0] _GEN_92 = {{uops_0_csr_cmd}, {uops_14_csr_cmd}, {uops_13_csr_cmd}, {uops_12_csr_cmd}, {uops_11_csr_cmd}, {uops_10_csr_cmd}, {uops_9_csr_cmd}, {uops_8_csr_cmd}, {uops_7_csr_cmd}, {uops_6_csr_cmd}, {uops_5_csr_cmd}, {uops_4_csr_cmd}, {uops_3_csr_cmd}, {uops_2_csr_cmd}, {uops_1_csr_cmd}, {uops_0_csr_cmd}}; // @[util.scala:505:22, :547:21]
assign out_uop_csr_cmd = _GEN_92[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_93 = {{uops_0_ldst_is_rs1}, {uops_14_ldst_is_rs1}, {uops_13_ldst_is_rs1}, {uops_12_ldst_is_rs1}, {uops_11_ldst_is_rs1}, {uops_10_ldst_is_rs1}, {uops_9_ldst_is_rs1}, {uops_8_ldst_is_rs1}, {uops_7_ldst_is_rs1}, {uops_6_ldst_is_rs1}, {uops_5_ldst_is_rs1}, {uops_4_ldst_is_rs1}, {uops_3_ldst_is_rs1}, {uops_2_ldst_is_rs1}, {uops_1_ldst_is_rs1}, {uops_0_ldst_is_rs1}}; // @[util.scala:505:22, :547:21]
assign out_uop_ldst_is_rs1 = _GEN_93[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_94 = {{uops_0_ldst}, {uops_14_ldst}, {uops_13_ldst}, {uops_12_ldst}, {uops_11_ldst}, {uops_10_ldst}, {uops_9_ldst}, {uops_8_ldst}, {uops_7_ldst}, {uops_6_ldst}, {uops_5_ldst}, {uops_4_ldst}, {uops_3_ldst}, {uops_2_ldst}, {uops_1_ldst}, {uops_0_ldst}}; // @[util.scala:505:22, :547:21]
assign out_uop_ldst = _GEN_94[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_95 = {{uops_0_lrs1}, {uops_14_lrs1}, {uops_13_lrs1}, {uops_12_lrs1}, {uops_11_lrs1}, {uops_10_lrs1}, {uops_9_lrs1}, {uops_8_lrs1}, {uops_7_lrs1}, {uops_6_lrs1}, {uops_5_lrs1}, {uops_4_lrs1}, {uops_3_lrs1}, {uops_2_lrs1}, {uops_1_lrs1}, {uops_0_lrs1}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs1 = _GEN_95[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_96 = {{uops_0_lrs2}, {uops_14_lrs2}, {uops_13_lrs2}, {uops_12_lrs2}, {uops_11_lrs2}, {uops_10_lrs2}, {uops_9_lrs2}, {uops_8_lrs2}, {uops_7_lrs2}, {uops_6_lrs2}, {uops_5_lrs2}, {uops_4_lrs2}, {uops_3_lrs2}, {uops_2_lrs2}, {uops_1_lrs2}, {uops_0_lrs2}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs2 = _GEN_96[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_97 = {{uops_0_lrs3}, {uops_14_lrs3}, {uops_13_lrs3}, {uops_12_lrs3}, {uops_11_lrs3}, {uops_10_lrs3}, {uops_9_lrs3}, {uops_8_lrs3}, {uops_7_lrs3}, {uops_6_lrs3}, {uops_5_lrs3}, {uops_4_lrs3}, {uops_3_lrs3}, {uops_2_lrs3}, {uops_1_lrs3}, {uops_0_lrs3}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs3 = _GEN_97[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_98 = {{uops_0_dst_rtype}, {uops_14_dst_rtype}, {uops_13_dst_rtype}, {uops_12_dst_rtype}, {uops_11_dst_rtype}, {uops_10_dst_rtype}, {uops_9_dst_rtype}, {uops_8_dst_rtype}, {uops_7_dst_rtype}, {uops_6_dst_rtype}, {uops_5_dst_rtype}, {uops_4_dst_rtype}, {uops_3_dst_rtype}, {uops_2_dst_rtype}, {uops_1_dst_rtype}, {uops_0_dst_rtype}}; // @[util.scala:505:22, :547:21]
assign out_uop_dst_rtype = _GEN_98[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_99 = {{uops_0_lrs1_rtype}, {uops_14_lrs1_rtype}, {uops_13_lrs1_rtype}, {uops_12_lrs1_rtype}, {uops_11_lrs1_rtype}, {uops_10_lrs1_rtype}, {uops_9_lrs1_rtype}, {uops_8_lrs1_rtype}, {uops_7_lrs1_rtype}, {uops_6_lrs1_rtype}, {uops_5_lrs1_rtype}, {uops_4_lrs1_rtype}, {uops_3_lrs1_rtype}, {uops_2_lrs1_rtype}, {uops_1_lrs1_rtype}, {uops_0_lrs1_rtype}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs1_rtype = _GEN_99[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_100 = {{uops_0_lrs2_rtype}, {uops_14_lrs2_rtype}, {uops_13_lrs2_rtype}, {uops_12_lrs2_rtype}, {uops_11_lrs2_rtype}, {uops_10_lrs2_rtype}, {uops_9_lrs2_rtype}, {uops_8_lrs2_rtype}, {uops_7_lrs2_rtype}, {uops_6_lrs2_rtype}, {uops_5_lrs2_rtype}, {uops_4_lrs2_rtype}, {uops_3_lrs2_rtype}, {uops_2_lrs2_rtype}, {uops_1_lrs2_rtype}, {uops_0_lrs2_rtype}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs2_rtype = _GEN_100[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_101 = {{uops_0_frs3_en}, {uops_14_frs3_en}, {uops_13_frs3_en}, {uops_12_frs3_en}, {uops_11_frs3_en}, {uops_10_frs3_en}, {uops_9_frs3_en}, {uops_8_frs3_en}, {uops_7_frs3_en}, {uops_6_frs3_en}, {uops_5_frs3_en}, {uops_4_frs3_en}, {uops_3_frs3_en}, {uops_2_frs3_en}, {uops_1_frs3_en}, {uops_0_frs3_en}}; // @[util.scala:505:22, :547:21]
assign out_uop_frs3_en = _GEN_101[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_102 = {{uops_0_fcn_dw}, {uops_14_fcn_dw}, {uops_13_fcn_dw}, {uops_12_fcn_dw}, {uops_11_fcn_dw}, {uops_10_fcn_dw}, {uops_9_fcn_dw}, {uops_8_fcn_dw}, {uops_7_fcn_dw}, {uops_6_fcn_dw}, {uops_5_fcn_dw}, {uops_4_fcn_dw}, {uops_3_fcn_dw}, {uops_2_fcn_dw}, {uops_1_fcn_dw}, {uops_0_fcn_dw}}; // @[util.scala:505:22, :547:21]
assign out_uop_fcn_dw = _GEN_102[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][4:0] _GEN_103 = {{uops_0_fcn_op}, {uops_14_fcn_op}, {uops_13_fcn_op}, {uops_12_fcn_op}, {uops_11_fcn_op}, {uops_10_fcn_op}, {uops_9_fcn_op}, {uops_8_fcn_op}, {uops_7_fcn_op}, {uops_6_fcn_op}, {uops_5_fcn_op}, {uops_4_fcn_op}, {uops_3_fcn_op}, {uops_2_fcn_op}, {uops_1_fcn_op}, {uops_0_fcn_op}}; // @[util.scala:505:22, :547:21]
assign out_uop_fcn_op = _GEN_103[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_104 = {{uops_0_fp_val}, {uops_14_fp_val}, {uops_13_fp_val}, {uops_12_fp_val}, {uops_11_fp_val}, {uops_10_fp_val}, {uops_9_fp_val}, {uops_8_fp_val}, {uops_7_fp_val}, {uops_6_fp_val}, {uops_5_fp_val}, {uops_4_fp_val}, {uops_3_fp_val}, {uops_2_fp_val}, {uops_1_fp_val}, {uops_0_fp_val}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_val = _GEN_104[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][2:0] _GEN_105 = {{uops_0_fp_rm}, {uops_14_fp_rm}, {uops_13_fp_rm}, {uops_12_fp_rm}, {uops_11_fp_rm}, {uops_10_fp_rm}, {uops_9_fp_rm}, {uops_8_fp_rm}, {uops_7_fp_rm}, {uops_6_fp_rm}, {uops_5_fp_rm}, {uops_4_fp_rm}, {uops_3_fp_rm}, {uops_2_fp_rm}, {uops_1_fp_rm}, {uops_0_fp_rm}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_rm = _GEN_105[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_106 = {{uops_0_fp_typ}, {uops_14_fp_typ}, {uops_13_fp_typ}, {uops_12_fp_typ}, {uops_11_fp_typ}, {uops_10_fp_typ}, {uops_9_fp_typ}, {uops_8_fp_typ}, {uops_7_fp_typ}, {uops_6_fp_typ}, {uops_5_fp_typ}, {uops_4_fp_typ}, {uops_3_fp_typ}, {uops_2_fp_typ}, {uops_1_fp_typ}, {uops_0_fp_typ}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_typ = _GEN_106[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_107 = {{uops_0_xcpt_pf_if}, {uops_14_xcpt_pf_if}, {uops_13_xcpt_pf_if}, {uops_12_xcpt_pf_if}, {uops_11_xcpt_pf_if}, {uops_10_xcpt_pf_if}, {uops_9_xcpt_pf_if}, {uops_8_xcpt_pf_if}, {uops_7_xcpt_pf_if}, {uops_6_xcpt_pf_if}, {uops_5_xcpt_pf_if}, {uops_4_xcpt_pf_if}, {uops_3_xcpt_pf_if}, {uops_2_xcpt_pf_if}, {uops_1_xcpt_pf_if}, {uops_0_xcpt_pf_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_xcpt_pf_if = _GEN_107[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_108 = {{uops_0_xcpt_ae_if}, {uops_14_xcpt_ae_if}, {uops_13_xcpt_ae_if}, {uops_12_xcpt_ae_if}, {uops_11_xcpt_ae_if}, {uops_10_xcpt_ae_if}, {uops_9_xcpt_ae_if}, {uops_8_xcpt_ae_if}, {uops_7_xcpt_ae_if}, {uops_6_xcpt_ae_if}, {uops_5_xcpt_ae_if}, {uops_4_xcpt_ae_if}, {uops_3_xcpt_ae_if}, {uops_2_xcpt_ae_if}, {uops_1_xcpt_ae_if}, {uops_0_xcpt_ae_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_xcpt_ae_if = _GEN_108[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_109 = {{uops_0_xcpt_ma_if}, {uops_14_xcpt_ma_if}, {uops_13_xcpt_ma_if}, {uops_12_xcpt_ma_if}, {uops_11_xcpt_ma_if}, {uops_10_xcpt_ma_if}, {uops_9_xcpt_ma_if}, {uops_8_xcpt_ma_if}, {uops_7_xcpt_ma_if}, {uops_6_xcpt_ma_if}, {uops_5_xcpt_ma_if}, {uops_4_xcpt_ma_if}, {uops_3_xcpt_ma_if}, {uops_2_xcpt_ma_if}, {uops_1_xcpt_ma_if}, {uops_0_xcpt_ma_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_xcpt_ma_if = _GEN_109[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_110 = {{uops_0_bp_debug_if}, {uops_14_bp_debug_if}, {uops_13_bp_debug_if}, {uops_12_bp_debug_if}, {uops_11_bp_debug_if}, {uops_10_bp_debug_if}, {uops_9_bp_debug_if}, {uops_8_bp_debug_if}, {uops_7_bp_debug_if}, {uops_6_bp_debug_if}, {uops_5_bp_debug_if}, {uops_4_bp_debug_if}, {uops_3_bp_debug_if}, {uops_2_bp_debug_if}, {uops_1_bp_debug_if}, {uops_0_bp_debug_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_bp_debug_if = _GEN_110[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_111 = {{uops_0_bp_xcpt_if}, {uops_14_bp_xcpt_if}, {uops_13_bp_xcpt_if}, {uops_12_bp_xcpt_if}, {uops_11_bp_xcpt_if}, {uops_10_bp_xcpt_if}, {uops_9_bp_xcpt_if}, {uops_8_bp_xcpt_if}, {uops_7_bp_xcpt_if}, {uops_6_bp_xcpt_if}, {uops_5_bp_xcpt_if}, {uops_4_bp_xcpt_if}, {uops_3_bp_xcpt_if}, {uops_2_bp_xcpt_if}, {uops_1_bp_xcpt_if}, {uops_0_bp_xcpt_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_bp_xcpt_if = _GEN_111[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][2:0] _GEN_112 = {{uops_0_debug_fsrc}, {uops_14_debug_fsrc}, {uops_13_debug_fsrc}, {uops_12_debug_fsrc}, {uops_11_debug_fsrc}, {uops_10_debug_fsrc}, {uops_9_debug_fsrc}, {uops_8_debug_fsrc}, {uops_7_debug_fsrc}, {uops_6_debug_fsrc}, {uops_5_debug_fsrc}, {uops_4_debug_fsrc}, {uops_3_debug_fsrc}, {uops_2_debug_fsrc}, {uops_1_debug_fsrc}, {uops_0_debug_fsrc}}; // @[util.scala:505:22, :547:21]
assign out_uop_debug_fsrc = _GEN_112[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][2:0] _GEN_113 = {{uops_0_debug_tsrc}, {uops_14_debug_tsrc}, {uops_13_debug_tsrc}, {uops_12_debug_tsrc}, {uops_11_debug_tsrc}, {uops_10_debug_tsrc}, {uops_9_debug_tsrc}, {uops_8_debug_tsrc}, {uops_7_debug_tsrc}, {uops_6_debug_tsrc}, {uops_5_debug_tsrc}, {uops_4_debug_tsrc}, {uops_3_debug_tsrc}, {uops_2_debug_tsrc}, {uops_1_debug_tsrc}, {uops_0_debug_tsrc}}; // @[util.scala:505:22, :547:21]
assign out_uop_debug_tsrc = _GEN_113[deq_ptr_value]; // @[Counter.scala:61:40]
wire _io_deq_valid_T = ~io_empty_0; // @[util.scala:458:7, :515:71, :548:32]
assign _io_deq_valid_T_1 = _io_deq_valid_T & _GEN_0; // @[util.scala:515:44, :548:{32,42}]
assign io_deq_valid_0 = _io_deq_valid_T_1; // @[util.scala:458:7, :548:42]
wire [4:0] _ptr_diff_T = _GEN_1 - _GEN_2; // @[Counter.scala:77:24]
wire [3:0] ptr_diff = _ptr_diff_T[3:0]; // @[util.scala:551:34]
wire [3:0] _io_count_T = {4{maybe_full}}; // @[util.scala:509:29, :557:12]
wire _io_count_T_1 = deq_ptr_value > enq_ptr_value; // @[Counter.scala:61:40]
wire [4:0] _io_count_T_2 = {1'h0, ptr_diff} + 5'hF; // @[util.scala:551:34, :560:26]
wire [3:0] _io_count_T_3 = _io_count_T_2[3:0]; // @[util.scala:560:26]
wire [3:0] _io_count_T_4 = _io_count_T_1 ? _io_count_T_3 : ptr_diff; // @[util.scala:551:34, :559:{12,27}, :560:26]
assign _io_count_T_5 = ptr_match ? _io_count_T : _io_count_T_4; // @[util.scala:511:35, :556:22, :557:12, :559:12]
assign io_count_0 = _io_count_T_5; // @[util.scala:458:7, :556:22]
wire _GEN_114 = enq_ptr_value == 4'h0; // @[Counter.scala:61:40]
wire _GEN_115 = do_enq & _GEN_114; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_116 = enq_ptr_value == 4'h1; // @[Counter.scala:61:40]
wire _GEN_117 = do_enq & _GEN_116; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_118 = enq_ptr_value == 4'h2; // @[Counter.scala:61:40]
wire _GEN_119 = do_enq & _GEN_118; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_120 = enq_ptr_value == 4'h3; // @[Counter.scala:61:40]
wire _GEN_121 = do_enq & _GEN_120; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_122 = enq_ptr_value == 4'h4; // @[Counter.scala:61:40]
wire _GEN_123 = do_enq & _GEN_122; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_124 = enq_ptr_value == 4'h5; // @[Counter.scala:61:40]
wire _GEN_125 = do_enq & _GEN_124; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_126 = enq_ptr_value == 4'h6; // @[Counter.scala:61:40]
wire _GEN_127 = do_enq & _GEN_126; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_128 = enq_ptr_value == 4'h7; // @[Counter.scala:61:40]
wire _GEN_129 = do_enq & _GEN_128; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_130 = enq_ptr_value == 4'h8; // @[Counter.scala:61:40]
wire _GEN_131 = do_enq & _GEN_130; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_132 = enq_ptr_value == 4'h9; // @[Counter.scala:61:40]
wire _GEN_133 = do_enq & _GEN_132; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_134 = enq_ptr_value == 4'hA; // @[Counter.scala:61:40]
wire _GEN_135 = do_enq & _GEN_134; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_136 = enq_ptr_value == 4'hB; // @[Counter.scala:61:40]
wire _GEN_137 = do_enq & _GEN_136; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_138 = enq_ptr_value == 4'hC; // @[Counter.scala:61:40]
wire _GEN_139 = do_enq & _GEN_138; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_140 = enq_ptr_value == 4'hD; // @[Counter.scala:61:40]
wire _GEN_141 = do_enq & _GEN_140; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_142 = do_enq & wrap; // @[Counter.scala:73:24]
always @(posedge clock) begin // @[util.scala:458:7]
if (reset) begin // @[util.scala:458:7]
valids_0 <= 1'h0; // @[util.scala:504:26]
valids_1 <= 1'h0; // @[util.scala:504:26]
valids_2 <= 1'h0; // @[util.scala:504:26]
valids_3 <= 1'h0; // @[util.scala:504:26]
valids_4 <= 1'h0; // @[util.scala:504:26]
valids_5 <= 1'h0; // @[util.scala:504:26]
valids_6 <= 1'h0; // @[util.scala:504:26]
valids_7 <= 1'h0; // @[util.scala:504:26]
valids_8 <= 1'h0; // @[util.scala:504:26]
valids_9 <= 1'h0; // @[util.scala:504:26]
valids_10 <= 1'h0; // @[util.scala:504:26]
valids_11 <= 1'h0; // @[util.scala:504:26]
valids_12 <= 1'h0; // @[util.scala:504:26]
valids_13 <= 1'h0; // @[util.scala:504:26]
valids_14 <= 1'h0; // @[util.scala:504:26]
enq_ptr_value <= 4'h0; // @[Counter.scala:61:40]
deq_ptr_value <= 4'h0; // @[Counter.scala:61:40]
maybe_full <= 1'h0; // @[util.scala:509:29]
end
else begin // @[util.scala:458:7]
valids_0 <= ~(do_deq & deq_ptr_value == 4'h0) & (_GEN_115 | _valids_0_T_7); // @[Counter.scala:61:40]
valids_1 <= ~(do_deq & deq_ptr_value == 4'h1) & (_GEN_117 | _valids_1_T_7); // @[Counter.scala:61:40]
valids_2 <= ~(do_deq & deq_ptr_value == 4'h2) & (_GEN_119 | _valids_2_T_7); // @[Counter.scala:61:40]
valids_3 <= ~(do_deq & deq_ptr_value == 4'h3) & (_GEN_121 | _valids_3_T_7); // @[Counter.scala:61:40]
valids_4 <= ~(do_deq & deq_ptr_value == 4'h4) & (_GEN_123 | _valids_4_T_7); // @[Counter.scala:61:40]
valids_5 <= ~(do_deq & deq_ptr_value == 4'h5) & (_GEN_125 | _valids_5_T_7); // @[Counter.scala:61:40]
valids_6 <= ~(do_deq & deq_ptr_value == 4'h6) & (_GEN_127 | _valids_6_T_7); // @[Counter.scala:61:40]
valids_7 <= ~(do_deq & deq_ptr_value == 4'h7) & (_GEN_129 | _valids_7_T_7); // @[Counter.scala:61:40]
valids_8 <= ~(do_deq & deq_ptr_value == 4'h8) & (_GEN_131 | _valids_8_T_7); // @[Counter.scala:61:40]
valids_9 <= ~(do_deq & deq_ptr_value == 4'h9) & (_GEN_133 | _valids_9_T_7); // @[Counter.scala:61:40]
valids_10 <= ~(do_deq & deq_ptr_value == 4'hA) & (_GEN_135 | _valids_10_T_7); // @[Counter.scala:61:40]
valids_11 <= ~(do_deq & deq_ptr_value == 4'hB) & (_GEN_137 | _valids_11_T_7); // @[Counter.scala:61:40]
valids_12 <= ~(do_deq & deq_ptr_value == 4'hC) & (_GEN_139 | _valids_12_T_7); // @[Counter.scala:61:40]
valids_13 <= ~(do_deq & deq_ptr_value == 4'hD) & (_GEN_141 | _valids_13_T_7); // @[Counter.scala:61:40]
valids_14 <= ~(do_deq & wrap_1) & (_GEN_142 | _valids_14_T_7); // @[Counter.scala:73:24]
if (do_enq) // @[util.scala:514:26]
enq_ptr_value <= wrap ? 4'h0 : _value_T_1; // @[Counter.scala:61:40, :73:24, :77:{15,24}, :87:{20,28}]
if (do_deq) // @[util.scala:515:26]
deq_ptr_value <= wrap_1 ? 4'h0 : _value_T_3; // @[Counter.scala:61:40, :73:24, :77:{15,24}, :87:{20,28}]
if (~(do_enq == do_deq)) // @[util.scala:509:29, :514:26, :515:26, :539:{18,30}, :540:18]
maybe_full <= do_enq; // @[util.scala:509:29, :514:26]
end
if (_GEN_115) begin // @[util.scala:520:18, :526:19, :528:35]
uops_0_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_0_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_0_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_0_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_0_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_0_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_0_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_0_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_0_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_0_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_0_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_0_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_0_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_0_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_0_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_0_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_0_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_0_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_0_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_0_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_0_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_0_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_0_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_0_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_0_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_0_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_0_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_0_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_0_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_0_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_0_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_0_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_0_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_0_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_0_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_0_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_0_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_0_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_0_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_0_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_0_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_0_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_0_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_0_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_0_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_0_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_0_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_0_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_0_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_0_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_0_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_0_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_0_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_0_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_0_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_0_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_0_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_0_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_0_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_0_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_0_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_0_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_0_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_0_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_0_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_0_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_0_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_0_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_0_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_0_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_0_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_0_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_0_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_0_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_0_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_0_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_0_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_0_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_114) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_0_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_0) // @[util.scala:504:26]
uops_0_br_mask <= _uops_0_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_117) begin // @[util.scala:520:18, :526:19, :528:35]
uops_1_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_1_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_1_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_1_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_1_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_1_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_1_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_1_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_1_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_1_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_1_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_1_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_1_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_1_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_1_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_1_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_1_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_1_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_1_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_1_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_1_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_1_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_1_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_1_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_1_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_1_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_1_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_1_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_1_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_1_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_1_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_1_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_1_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_1_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_1_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_1_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_1_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_1_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_1_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_1_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_1_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_1_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_1_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_1_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_1_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_1_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_1_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_1_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_1_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_1_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_1_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_1_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_1_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_1_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_1_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_1_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_1_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_1_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_1_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_1_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_1_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_1_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_1_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_1_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_1_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_1_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_1_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_1_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_1_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_1_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_1_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_1_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_1_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_1_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_1_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_1_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_1_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_1_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_116) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_1_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_1) // @[util.scala:504:26]
uops_1_br_mask <= _uops_1_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_119) begin // @[util.scala:520:18, :526:19, :528:35]
uops_2_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_2_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_2_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_2_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_2_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_2_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_2_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_2_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_2_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_2_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_2_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_2_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_2_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_2_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_2_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_2_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_2_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_2_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_2_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_2_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_2_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_2_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_2_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_2_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_2_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_2_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_2_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_2_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_2_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_2_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_2_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_2_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_2_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_2_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_2_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_2_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_2_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_2_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_2_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_2_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_2_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_2_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_2_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_2_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_2_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_2_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_2_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_2_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_2_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_2_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_2_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_2_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_2_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_2_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_2_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_2_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_2_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_2_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_2_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_2_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_2_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_2_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_2_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_2_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_2_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_2_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_2_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_2_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_2_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_2_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_2_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_2_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_2_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_2_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_2_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_2_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_2_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_2_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_118) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_2_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_2) // @[util.scala:504:26]
uops_2_br_mask <= _uops_2_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_121) begin // @[util.scala:520:18, :526:19, :528:35]
uops_3_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_3_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_3_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_3_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_3_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_3_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_3_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_3_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_3_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_3_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_3_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_3_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_3_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_3_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_3_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_3_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_3_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_3_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_3_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_3_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_3_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_3_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_3_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_3_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_3_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_3_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_3_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_3_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_3_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_3_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_3_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_3_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_3_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_3_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_3_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_3_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_3_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_3_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_3_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_3_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_3_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_3_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_3_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_3_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_3_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_3_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_3_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_3_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_3_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_3_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_3_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_3_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_3_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_3_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_3_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_3_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_3_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_3_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_3_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_3_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_3_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_3_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_3_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_3_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_3_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_3_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_3_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_3_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_3_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_3_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_3_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_3_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_3_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_3_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_3_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_3_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_3_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_3_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_120) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_3_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_3) // @[util.scala:504:26]
uops_3_br_mask <= _uops_3_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_123) begin // @[util.scala:520:18, :526:19, :528:35]
uops_4_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_4_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_4_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_4_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_4_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_4_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_4_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_4_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_4_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_4_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_4_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_4_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_4_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_4_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_4_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_4_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_4_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_4_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_4_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_4_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_4_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_4_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_4_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_4_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_4_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_4_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_4_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_4_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_4_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_4_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_4_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_4_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_4_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_4_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_4_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_4_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_4_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_4_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_4_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_4_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_4_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_4_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_4_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_4_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_4_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_4_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_4_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_4_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_4_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_4_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_4_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_4_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_4_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_4_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_4_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_4_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_4_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_4_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_4_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_4_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_4_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_4_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_4_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_4_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_4_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_4_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_4_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_4_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_4_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_4_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_4_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_4_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_4_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_4_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_4_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_4_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_4_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_4_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_122) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_4_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_4) // @[util.scala:504:26]
uops_4_br_mask <= _uops_4_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_125) begin // @[util.scala:520:18, :526:19, :528:35]
uops_5_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_5_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_5_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_5_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_5_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_5_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_5_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_5_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_5_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_5_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_5_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_5_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_5_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_5_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_5_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_5_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_5_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_5_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_5_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_5_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_5_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_5_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_5_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_5_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_5_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_5_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_5_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_5_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_5_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_5_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_5_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_5_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_5_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_5_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_5_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_5_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_5_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_5_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_5_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_5_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_5_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_5_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_5_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_5_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_5_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_5_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_5_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_5_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_5_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_5_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_5_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_5_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_5_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_5_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_5_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_5_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_5_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_5_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_5_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_5_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_5_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_5_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_5_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_5_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_5_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_5_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_5_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_5_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_5_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_5_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_5_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_5_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_5_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_5_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_5_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_5_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_5_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_5_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_124) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_5_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_5) // @[util.scala:504:26]
uops_5_br_mask <= _uops_5_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_127) begin // @[util.scala:520:18, :526:19, :528:35]
uops_6_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_6_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_6_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_6_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_6_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_6_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_6_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_6_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_6_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_6_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_6_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_6_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_6_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_6_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_6_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_6_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_6_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_6_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_6_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_6_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_6_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_6_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_6_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_6_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_6_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_6_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_6_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_6_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_6_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_6_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_6_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_6_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_6_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_6_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_6_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_6_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_6_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_6_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_6_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_6_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_6_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_6_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_6_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_6_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_6_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_6_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_6_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_6_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_6_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_6_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_6_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_6_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_6_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_6_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_6_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_6_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_6_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_6_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_6_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_6_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_6_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_6_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_6_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_6_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_6_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_6_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_6_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_6_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_6_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_6_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_6_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_6_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_6_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_6_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_6_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_6_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_6_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_6_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_126) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_6_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_6) // @[util.scala:504:26]
uops_6_br_mask <= _uops_6_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_129) begin // @[util.scala:520:18, :526:19, :528:35]
uops_7_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_7_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_7_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_7_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_7_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_7_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_7_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_7_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_7_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_7_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_7_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_7_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_7_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_7_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_7_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_7_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_7_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_7_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_7_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_7_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_7_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_7_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_7_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_7_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_7_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_7_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_7_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_7_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_7_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_7_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_7_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_7_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_7_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_7_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_7_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_7_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_7_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_7_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_7_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_7_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_7_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_7_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_7_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_7_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_7_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_7_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_7_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_7_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_7_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_7_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_7_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_7_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_7_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_7_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_7_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_7_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_7_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_7_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_7_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_7_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_7_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_7_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_7_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_7_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_7_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_7_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_7_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_7_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_7_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_7_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_7_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_7_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_7_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_7_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_7_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_7_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_7_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_7_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_128) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_7_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_7) // @[util.scala:504:26]
uops_7_br_mask <= _uops_7_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_131) begin // @[util.scala:520:18, :526:19, :528:35]
uops_8_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_8_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_8_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_8_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_8_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_8_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_8_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_8_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_8_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_8_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_8_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_8_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_8_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_8_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_8_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_8_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_8_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_8_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_8_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_8_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_8_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_8_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_8_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_8_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_8_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_8_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_8_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_8_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_8_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_8_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_8_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_8_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_8_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_8_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_8_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_8_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_8_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_8_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_8_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_8_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_8_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_8_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_8_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_8_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_8_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_8_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_8_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_8_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_8_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_8_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_8_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_8_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_8_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_8_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_8_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_8_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_8_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_8_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_8_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_8_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_8_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_8_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_8_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_8_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_8_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_8_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_8_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_8_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_8_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_8_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_8_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_8_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_8_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_8_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_8_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_8_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_8_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_8_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_8_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_8_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_8_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_8_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_8_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_130) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_8_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_8) // @[util.scala:504:26]
uops_8_br_mask <= _uops_8_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_133) begin // @[util.scala:520:18, :526:19, :528:35]
uops_9_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_9_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_9_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_9_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_9_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_9_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_9_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_9_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_9_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_9_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_9_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_9_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_9_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_9_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_9_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_9_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_9_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_9_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_9_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_9_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_9_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_9_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_9_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_9_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_9_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_9_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_9_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_9_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_9_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_9_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_9_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_9_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_9_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_9_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_9_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_9_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_9_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_9_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_9_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_9_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_9_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_9_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_9_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_9_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_9_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_9_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_9_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_9_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_9_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_9_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_9_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_9_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_9_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_9_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_9_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_9_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_9_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_9_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_9_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_9_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_9_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_9_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_9_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_9_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_9_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_9_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_9_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_9_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_9_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_9_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_9_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_9_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_9_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_9_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_9_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_9_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_9_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_9_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_9_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_9_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_9_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_9_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_9_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_132) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_9_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_9) // @[util.scala:504:26]
uops_9_br_mask <= _uops_9_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_135) begin // @[util.scala:520:18, :526:19, :528:35]
uops_10_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_10_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_10_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_10_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_10_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_10_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_10_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_10_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_10_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_10_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_10_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_10_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_10_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_10_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_10_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_10_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_10_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_10_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_10_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_10_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_10_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_10_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_10_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_10_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_10_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_10_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_10_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_10_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_10_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_10_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_10_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_10_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_10_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_10_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_10_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_10_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_10_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_10_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_10_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_10_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_10_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_10_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_10_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_10_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_10_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_10_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_10_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_10_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_10_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_10_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_10_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_10_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_10_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_10_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_10_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_10_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_10_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_10_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_10_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_10_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_10_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_10_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_10_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_10_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_10_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_10_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_10_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_10_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_10_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_10_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_10_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_10_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_10_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_10_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_10_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_10_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_10_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_10_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_10_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_10_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_10_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_10_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_10_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_134) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_10_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_10) // @[util.scala:504:26]
uops_10_br_mask <= _uops_10_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_137) begin // @[util.scala:520:18, :526:19, :528:35]
uops_11_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_11_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_11_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_11_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_11_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_11_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_11_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_11_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_11_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_11_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_11_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_11_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_11_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_11_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_11_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_11_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_11_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_11_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_11_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_11_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_11_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_11_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_11_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_11_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_11_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_11_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_11_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_11_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_11_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_11_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_11_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_11_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_11_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_11_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_11_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_11_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_11_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_11_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_11_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_11_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_11_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_11_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_11_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_11_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_11_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_11_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_11_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_11_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_11_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_11_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_11_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_11_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_11_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_11_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_11_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_11_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_11_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_11_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_11_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_11_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_11_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_11_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_11_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_11_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_11_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_11_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_11_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_11_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_11_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_11_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_11_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_11_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_11_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_11_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_11_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_11_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_11_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_11_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_11_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_11_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_11_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_11_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_11_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_136) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_11_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_11) // @[util.scala:504:26]
uops_11_br_mask <= _uops_11_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_139) begin // @[util.scala:520:18, :526:19, :528:35]
uops_12_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_12_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_12_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_12_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_12_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_12_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_12_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_12_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_12_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_12_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_12_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_12_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_12_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_12_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_12_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_12_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_12_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_12_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_12_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_12_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_12_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_12_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_12_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_12_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_12_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_12_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_12_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_12_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_12_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_12_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_12_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_12_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_12_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_12_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_12_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_12_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_12_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_12_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_12_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_12_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_12_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_12_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_12_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_12_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_12_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_12_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_12_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_12_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_12_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_12_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_12_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_12_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_12_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_12_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_12_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_12_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_12_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_12_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_12_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_12_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_12_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_12_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_12_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_12_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_12_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_12_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_12_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_12_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_12_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_12_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_12_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_12_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_12_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_12_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_12_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_12_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_12_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_12_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_12_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_12_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_12_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_12_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_12_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_138) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_12_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_12) // @[util.scala:504:26]
uops_12_br_mask <= _uops_12_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_141) begin // @[util.scala:520:18, :526:19, :528:35]
uops_13_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_13_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_13_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_13_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_13_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_13_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_13_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_13_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_13_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_13_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_13_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_13_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_13_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_13_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_13_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_13_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_13_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_13_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_13_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_13_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_13_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_13_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_13_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_13_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_13_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_13_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_13_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_13_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_13_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_13_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_13_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_13_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_13_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_13_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_13_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_13_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_13_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_13_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_13_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_13_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_13_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_13_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_13_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_13_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_13_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_13_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_13_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_13_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_13_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_13_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_13_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_13_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_13_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_13_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_13_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_13_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_13_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_13_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_13_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_13_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_13_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_13_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_13_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_13_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_13_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_13_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_13_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_13_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_13_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_13_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_13_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_13_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_13_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_13_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_13_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_13_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_13_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_13_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_13_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_13_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_13_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_13_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_13_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_140) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_13_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_13) // @[util.scala:504:26]
uops_13_br_mask <= _uops_13_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_142) begin // @[util.scala:520:18, :526:19, :528:35]
uops_14_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_14_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_14_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_14_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_14_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_14_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_14_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_14_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_14_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_14_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_14_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_14_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_14_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_14_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_14_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_14_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_14_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_14_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_14_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_14_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_14_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_14_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_14_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_14_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_14_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_14_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_14_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_14_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_14_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_14_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_14_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_14_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_14_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_14_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_14_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_14_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_14_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_14_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_14_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_14_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_14_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_14_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_14_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_14_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_14_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_14_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_14_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_14_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_14_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_14_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_14_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_14_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_14_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_14_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_14_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_14_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_14_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_14_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_14_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_14_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_14_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_14_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_14_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_14_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_14_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_14_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_14_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_14_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_14_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_14_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_14_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_14_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_14_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_14_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_14_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_14_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_14_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_14_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_14_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_14_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_14_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_14_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_14_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & wrap) // @[Counter.scala:73:24]
uops_14_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_14) // @[util.scala:504:26]
uops_14_br_mask <= _uops_14_br_mask_T_1; // @[util.scala:97:21, :505:22]
always @(posedge)
ram_15x131 ram_ext ( // @[util.scala:503:22]
.R0_addr (deq_ptr_value), // @[Counter.scala:61:40]
.R0_en (1'h1),
.R0_clk (clock),
.R0_data (_ram_ext_R0_data),
.W0_addr (enq_ptr_value), // @[Counter.scala:61:40]
.W0_en (do_enq), // @[util.scala:514:26]
.W0_clk (clock),
.W0_data ({io_enq_bits_sdq_id_0, io_enq_bits_way_en_0, io_enq_bits_old_meta_tag_0, io_enq_bits_old_meta_coh_state_0, io_enq_bits_tag_match_0, io_enq_bits_is_hella_0, io_enq_bits_data_0, io_enq_bits_addr_0}) // @[util.scala:458:7, :503:22]
); // @[util.scala:503:22]
assign io_enq_ready = io_enq_ready_0; // @[util.scala:458:7]
assign io_deq_valid = io_deq_valid_0; // @[util.scala:458:7]
assign io_deq_bits_uop_inst = io_deq_bits_uop_inst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_debug_inst = io_deq_bits_uop_debug_inst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_rvc = io_deq_bits_uop_is_rvc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_debug_pc = io_deq_bits_uop_debug_pc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iq_type_0 = io_deq_bits_uop_iq_type_0_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iq_type_1 = io_deq_bits_uop_iq_type_1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iq_type_2 = io_deq_bits_uop_iq_type_2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iq_type_3 = io_deq_bits_uop_iq_type_3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_0 = io_deq_bits_uop_fu_code_0_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_1 = io_deq_bits_uop_fu_code_1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_2 = io_deq_bits_uop_fu_code_2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_3 = io_deq_bits_uop_fu_code_3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_4 = io_deq_bits_uop_fu_code_4_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_5 = io_deq_bits_uop_fu_code_5_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_6 = io_deq_bits_uop_fu_code_6_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_7 = io_deq_bits_uop_fu_code_7_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_8 = io_deq_bits_uop_fu_code_8_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_9 = io_deq_bits_uop_fu_code_9_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_issued = io_deq_bits_uop_iw_issued_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_issued_partial_agen = io_deq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_issued_partial_dgen = io_deq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p1_speculative_child = io_deq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p2_speculative_child = io_deq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p1_bypass_hint = io_deq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p2_bypass_hint = io_deq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p3_bypass_hint = io_deq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_dis_col_sel = io_deq_bits_uop_dis_col_sel_0; // @[util.scala:458:7]
assign io_deq_bits_uop_br_mask = io_deq_bits_uop_br_mask_0; // @[util.scala:458:7]
assign io_deq_bits_uop_br_tag = io_deq_bits_uop_br_tag_0; // @[util.scala:458:7]
assign io_deq_bits_uop_br_type = io_deq_bits_uop_br_type_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_sfb = io_deq_bits_uop_is_sfb_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_fence = io_deq_bits_uop_is_fence_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_fencei = io_deq_bits_uop_is_fencei_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_sfence = io_deq_bits_uop_is_sfence_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_amo = io_deq_bits_uop_is_amo_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_eret = io_deq_bits_uop_is_eret_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_sys_pc2epc = io_deq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_rocc = io_deq_bits_uop_is_rocc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_mov = io_deq_bits_uop_is_mov_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ftq_idx = io_deq_bits_uop_ftq_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_edge_inst = io_deq_bits_uop_edge_inst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_pc_lob = io_deq_bits_uop_pc_lob_0; // @[util.scala:458:7]
assign io_deq_bits_uop_taken = io_deq_bits_uop_taken_0; // @[util.scala:458:7]
assign io_deq_bits_uop_imm_rename = io_deq_bits_uop_imm_rename_0; // @[util.scala:458:7]
assign io_deq_bits_uop_imm_sel = io_deq_bits_uop_imm_sel_0; // @[util.scala:458:7]
assign io_deq_bits_uop_pimm = io_deq_bits_uop_pimm_0; // @[util.scala:458:7]
assign io_deq_bits_uop_imm_packed = io_deq_bits_uop_imm_packed_0; // @[util.scala:458:7]
assign io_deq_bits_uop_op1_sel = io_deq_bits_uop_op1_sel_0; // @[util.scala:458:7]
assign io_deq_bits_uop_op2_sel = io_deq_bits_uop_op2_sel_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_ldst = io_deq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_wen = io_deq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_ren1 = io_deq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_ren2 = io_deq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_ren3 = io_deq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_swap12 = io_deq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_swap23 = io_deq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_typeTagIn = io_deq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_typeTagOut = io_deq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_fromint = io_deq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_toint = io_deq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_fastpipe = io_deq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_fma = io_deq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_div = io_deq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_sqrt = io_deq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_wflags = io_deq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_vec = io_deq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7]
assign io_deq_bits_uop_rob_idx = io_deq_bits_uop_rob_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ldq_idx = io_deq_bits_uop_ldq_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_stq_idx = io_deq_bits_uop_stq_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_rxq_idx = io_deq_bits_uop_rxq_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_pdst = io_deq_bits_uop_pdst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs1 = io_deq_bits_uop_prs1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs2 = io_deq_bits_uop_prs2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs3 = io_deq_bits_uop_prs3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ppred = io_deq_bits_uop_ppred_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs1_busy = io_deq_bits_uop_prs1_busy_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs2_busy = io_deq_bits_uop_prs2_busy_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs3_busy = io_deq_bits_uop_prs3_busy_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ppred_busy = io_deq_bits_uop_ppred_busy_0; // @[util.scala:458:7]
assign io_deq_bits_uop_stale_pdst = io_deq_bits_uop_stale_pdst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_exception = io_deq_bits_uop_exception_0; // @[util.scala:458:7]
assign io_deq_bits_uop_exc_cause = io_deq_bits_uop_exc_cause_0; // @[util.scala:458:7]
assign io_deq_bits_uop_mem_cmd = io_deq_bits_uop_mem_cmd_0; // @[util.scala:458:7]
assign io_deq_bits_uop_mem_size = io_deq_bits_uop_mem_size_0; // @[util.scala:458:7]
assign io_deq_bits_uop_mem_signed = io_deq_bits_uop_mem_signed_0; // @[util.scala:458:7]
assign io_deq_bits_uop_uses_ldq = io_deq_bits_uop_uses_ldq_0; // @[util.scala:458:7]
assign io_deq_bits_uop_uses_stq = io_deq_bits_uop_uses_stq_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_unique = io_deq_bits_uop_is_unique_0; // @[util.scala:458:7]
assign io_deq_bits_uop_flush_on_commit = io_deq_bits_uop_flush_on_commit_0; // @[util.scala:458:7]
assign io_deq_bits_uop_csr_cmd = io_deq_bits_uop_csr_cmd_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ldst_is_rs1 = io_deq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ldst = io_deq_bits_uop_ldst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs1 = io_deq_bits_uop_lrs1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs2 = io_deq_bits_uop_lrs2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs3 = io_deq_bits_uop_lrs3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_dst_rtype = io_deq_bits_uop_dst_rtype_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs1_rtype = io_deq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs2_rtype = io_deq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7]
assign io_deq_bits_uop_frs3_en = io_deq_bits_uop_frs3_en_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fcn_dw = io_deq_bits_uop_fcn_dw_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fcn_op = io_deq_bits_uop_fcn_op_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_val = io_deq_bits_uop_fp_val_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_rm = io_deq_bits_uop_fp_rm_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_typ = io_deq_bits_uop_fp_typ_0; // @[util.scala:458:7]
assign io_deq_bits_uop_xcpt_pf_if = io_deq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_xcpt_ae_if = io_deq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_xcpt_ma_if = io_deq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_bp_debug_if = io_deq_bits_uop_bp_debug_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_bp_xcpt_if = io_deq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_debug_fsrc = io_deq_bits_uop_debug_fsrc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_debug_tsrc = io_deq_bits_uop_debug_tsrc_0; // @[util.scala:458:7]
assign io_deq_bits_addr = io_deq_bits_addr_0; // @[util.scala:458:7]
assign io_deq_bits_data = io_deq_bits_data_0; // @[util.scala:458:7]
assign io_deq_bits_is_hella = io_deq_bits_is_hella_0; // @[util.scala:458:7]
assign io_deq_bits_tag_match = io_deq_bits_tag_match_0; // @[util.scala:458:7]
assign io_deq_bits_old_meta_coh_state = io_deq_bits_old_meta_coh_state_0; // @[util.scala:458:7]
assign io_deq_bits_old_meta_tag = io_deq_bits_old_meta_tag_0; // @[util.scala:458:7]
assign io_deq_bits_way_en = io_deq_bits_way_en_0; // @[util.scala:458:7]
assign io_deq_bits_sdq_id = io_deq_bits_sdq_id_0; // @[util.scala:458:7]
assign io_empty = io_empty_0; // @[util.scala:458:7]
assign io_count = io_count_0; // @[util.scala:458:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File util.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v4.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v4.common.{MicroOp}
import boom.v4.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, flush: Bool, uop: MicroOp): Bool = {
return apply(brupdate, flush, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, flush: Bool, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask) || flush
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: T): Bool = {
return apply(brupdate, flush, bundle.uop)
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Bool = {
return apply(brupdate, flush, bundle.bits)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, flush, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v4.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U, IS_N}
def apply(i: UInt, isel: UInt): UInt = {
val ip = Mux(isel === IS_N, 0.U(LONGEST_IMM_SZ.W), i)
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0)
}
}
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
object IsYoungerMask
{
def apply(i: UInt, head: UInt, n: Integer): UInt = {
val hi_mask = ~MaskLower(UIntToOH(i)(n-1,0))
val lo_mask = ~MaskUpper(UIntToOH(head)(n-1,0))
Mux(i < head, hi_mask & lo_mask, hi_mask | lo_mask)(n-1,0)
}
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v4.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v4.common.MicroOp => Bool = u => true.B, fastDeq: Boolean = false)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
if (fastDeq && entries > 1) {
// Pipeline dequeue selection so the mux gets an entire cycle
val main = Module(new BranchKillableQueue(gen, entries-1, flush_fn, false))
val out_reg = Reg(gen)
val out_valid = RegInit(false.B)
val out_uop = Reg(new MicroOp)
main.io.enq <> io.enq
main.io.brupdate := io.brupdate
main.io.flush := io.flush
io.empty := main.io.empty && !out_valid
io.count := main.io.count + out_valid
io.deq.valid := out_valid
io.deq.bits := out_reg
io.deq.bits.uop := out_uop
out_uop := UpdateBrMask(io.brupdate, out_uop)
out_valid := out_valid && !IsKilledByBranch(io.brupdate, false.B, out_uop) && !(io.flush && flush_fn(out_uop))
main.io.deq.ready := false.B
when (io.deq.fire || !out_valid) {
out_valid := main.io.deq.valid && !IsKilledByBranch(io.brupdate, false.B, main.io.deq.bits.uop) && !(io.flush && flush_fn(main.io.deq.bits.uop))
out_reg := main.io.deq.bits
out_uop := UpdateBrMask(io.brupdate, main.io.deq.bits.uop)
main.io.deq.ready := true.B
}
} else {
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire && !IsKilledByBranch(io.brupdate, false.B, io.enq.bits.uop) && !(io.flush && flush_fn(io.enq.bits.uop)))
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, false.B, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value)
io.deq.bits := out
val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
}
class BranchKillablePipeline[T <: boom.v4.common.HasBoomUOP](gen: T, stages: Int)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val req = Input(Valid(gen))
val flush = Input(Bool())
val brupdate = Input(new BrUpdateInfo)
val resp = Output(Vec(stages, Valid(gen)))
})
require(stages > 0)
val uops = Reg(Vec(stages, Valid(gen)))
uops(0).valid := io.req.valid && !IsKilledByBranch(io.brupdate, io.flush, io.req.bits)
uops(0).bits := UpdateBrMask(io.brupdate, io.req.bits)
for (i <- 1 until stages) {
uops(i).valid := uops(i-1).valid && !IsKilledByBranch(io.brupdate, io.flush, uops(i-1).bits)
uops(i).bits := UpdateBrMask(io.brupdate, uops(i-1).bits)
}
for (i <- 0 until stages) { when (reset.asBool) { uops(i).valid := false.B } }
io.resp := uops
}
File issue-slot.scala:
//******************************************************************************
// Copyright (c) 2015 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// RISCV Processor Issue Slot Logic
//--------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// Note: stores (and AMOs) are "broken down" into 2 uops, but stored within a single issue-slot.
// TODO XXX make a separate issueSlot for MemoryIssueSlots, and only they break apart stores.
// TODO Disable ldspec for FP queue.
package boom.v4.exu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import boom.v4.common._
import boom.v4.util._
class IssueSlotIO(val numWakeupPorts: Int)(implicit p: Parameters) extends BoomBundle
{
val valid = Output(Bool())
val will_be_valid = Output(Bool()) // TODO code review, do we need this signal so explicitely?
val request = Output(Bool())
val grant = Input(Bool())
val iss_uop = Output(new MicroOp())
val in_uop = Input(Valid(new MicroOp())) // if valid, this WILL overwrite an entry!
val out_uop = Output(new MicroOp())
val brupdate = Input(new BrUpdateInfo())
val kill = Input(Bool()) // pipeline flush
val clear = Input(Bool()) // entry being moved elsewhere (not mutually exclusive with grant)
val squash_grant = Input(Bool())
val wakeup_ports = Flipped(Vec(numWakeupPorts, Valid(new Wakeup)))
val pred_wakeup_port = Flipped(Valid(UInt(log2Ceil(ftqSz).W)))
val child_rebusys = Input(UInt(aluWidth.W))
}
class IssueSlot(val numWakeupPorts: Int, val isMem: Boolean, val isFp: Boolean)(implicit p: Parameters)
extends BoomModule
{
val io = IO(new IssueSlotIO(numWakeupPorts))
val slot_valid = RegInit(false.B)
val slot_uop = Reg(new MicroOp())
val next_valid = WireInit(slot_valid)
val next_uop = WireInit(UpdateBrMask(io.brupdate, slot_uop))
val killed = IsKilledByBranch(io.brupdate, io.kill, slot_uop)
io.valid := slot_valid
io.out_uop := next_uop
io.will_be_valid := next_valid && !killed
when (io.kill) {
slot_valid := false.B
} .elsewhen (io.in_uop.valid) {
slot_valid := true.B
} .elsewhen (io.clear) {
slot_valid := false.B
} .otherwise {
slot_valid := next_valid && !killed
}
when (io.in_uop.valid) {
slot_uop := io.in_uop.bits
assert (!slot_valid || io.clear || io.kill)
} .otherwise {
slot_uop := next_uop
}
// Wakeups
next_uop.iw_p1_bypass_hint := false.B
next_uop.iw_p2_bypass_hint := false.B
next_uop.iw_p3_bypass_hint := false.B
next_uop.iw_p1_speculative_child := 0.U
next_uop.iw_p2_speculative_child := 0.U
val rebusied_prs1 = WireInit(false.B)
val rebusied_prs2 = WireInit(false.B)
val rebusied = rebusied_prs1 || rebusied_prs2
val prs1_matches = io.wakeup_ports.map { w => w.bits.uop.pdst === slot_uop.prs1 }
val prs2_matches = io.wakeup_ports.map { w => w.bits.uop.pdst === slot_uop.prs2 }
val prs3_matches = io.wakeup_ports.map { w => w.bits.uop.pdst === slot_uop.prs3 }
val prs1_wakeups = (io.wakeup_ports zip prs1_matches).map { case (w,m) => w.valid && m }
val prs2_wakeups = (io.wakeup_ports zip prs2_matches).map { case (w,m) => w.valid && m }
val prs3_wakeups = (io.wakeup_ports zip prs3_matches).map { case (w,m) => w.valid && m }
val prs1_rebusys = (io.wakeup_ports zip prs1_matches).map { case (w,m) => w.bits.rebusy && m }
val prs2_rebusys = (io.wakeup_ports zip prs2_matches).map { case (w,m) => w.bits.rebusy && m }
val bypassables = io.wakeup_ports.map { w => w.bits.bypassable }
val speculative_masks = io.wakeup_ports.map { w => w.bits.speculative_mask }
when (prs1_wakeups.reduce(_||_)) {
next_uop.prs1_busy := false.B
next_uop.iw_p1_speculative_child := Mux1H(prs1_wakeups, speculative_masks)
next_uop.iw_p1_bypass_hint := Mux1H(prs1_wakeups, bypassables)
}
when ((prs1_rebusys.reduce(_||_) || ((io.child_rebusys & slot_uop.iw_p1_speculative_child) =/= 0.U)) &&
slot_uop.lrs1_rtype === RT_FIX) {
next_uop.prs1_busy := true.B
rebusied_prs1 := true.B
}
when (prs2_wakeups.reduce(_||_)) {
next_uop.prs2_busy := false.B
next_uop.iw_p2_speculative_child := Mux1H(prs2_wakeups, speculative_masks)
next_uop.iw_p2_bypass_hint := Mux1H(prs2_wakeups, bypassables)
}
when ((prs2_rebusys.reduce(_||_) || ((io.child_rebusys & slot_uop.iw_p2_speculative_child) =/= 0.U)) &&
slot_uop.lrs2_rtype === RT_FIX) {
next_uop.prs2_busy := true.B
rebusied_prs2 := true.B
}
when (prs3_wakeups.reduce(_||_)) {
next_uop.prs3_busy := false.B
next_uop.iw_p3_bypass_hint := Mux1H(prs3_wakeups, bypassables)
}
when (io.pred_wakeup_port.valid && io.pred_wakeup_port.bits === slot_uop.ppred) {
next_uop.ppred_busy := false.B
}
val iss_ready = !slot_uop.prs1_busy && !slot_uop.prs2_busy && !(slot_uop.ppred_busy && enableSFBOpt.B) && !(slot_uop.prs3_busy && isFp.B)
val agen_ready = (slot_uop.fu_code(FC_AGEN) && !slot_uop.prs1_busy && !(slot_uop.ppred_busy && enableSFBOpt.B) && isMem.B)
val dgen_ready = (slot_uop.fu_code(FC_DGEN) && !slot_uop.prs2_busy && !(slot_uop.ppred_busy && enableSFBOpt.B) && isMem.B)
io.request := slot_valid && !slot_uop.iw_issued && (
iss_ready || agen_ready || dgen_ready
)
io.iss_uop := slot_uop
// Update state for current micro-op based on grant
next_uop.iw_issued := false.B
next_uop.iw_issued_partial_agen := false.B
next_uop.iw_issued_partial_dgen := false.B
when (io.grant && !io.squash_grant) {
next_uop.iw_issued := true.B
}
if (isMem) {
when (slot_uop.fu_code(FC_AGEN) && slot_uop.fu_code(FC_DGEN)) {
when (agen_ready) {
// Issue the AGEN, next slot entry is a DGEN
when (io.grant && !io.squash_grant) {
next_uop.iw_issued_partial_agen := true.B
}
io.iss_uop.fu_code(FC_AGEN) := true.B
io.iss_uop.fu_code(FC_DGEN) := false.B
} .otherwise {
// Issue the DGEN, next slot entry is the AGEN
when (io.grant && !io.squash_grant) {
next_uop.iw_issued_partial_dgen := true.B
}
io.iss_uop.fu_code(FC_AGEN) := false.B
io.iss_uop.fu_code(FC_DGEN) := true.B
io.iss_uop.imm_sel := IS_N
io.iss_uop.prs1 := slot_uop.prs2
io.iss_uop.lrs1_rtype := slot_uop.lrs2_rtype
io.iss_uop.iw_p1_bypass_hint := slot_uop.iw_p2_bypass_hint
}
} .elsewhen (slot_uop.fu_code(FC_DGEN)) {
io.iss_uop.imm_sel := IS_N
io.iss_uop.prs1 := slot_uop.prs2
io.iss_uop.lrs1_rtype := slot_uop.lrs2_rtype
io.iss_uop.iw_p1_bypass_hint := slot_uop.iw_p2_bypass_hint
}
io.iss_uop.lrs2_rtype := RT_X
io.iss_uop.prs2 := io.iss_uop.prs1 // helps with DCE
}
when (slot_valid && slot_uop.iw_issued) {
next_valid := rebusied
if (isMem) {
when (slot_uop.iw_issued_partial_agen) {
next_valid := true.B
when (!rebusied_prs1) {
next_uop.fu_code(FC_AGEN) := false.B
next_uop.fu_code(FC_DGEN) := true.B
}
} .elsewhen (slot_uop.iw_issued_partial_dgen) {
next_valid := true.B
when (!rebusied_prs2) {
next_uop.fu_code(FC_AGEN) := true.B
next_uop.fu_code(FC_DGEN) := false.B
}
}
}
}
}
| module IssueSlot_61( // @[issue-slot.scala:49:7]
input clock, // @[issue-slot.scala:49:7]
input reset, // @[issue-slot.scala:49:7]
output io_valid, // @[issue-slot.scala:52:14]
output io_will_be_valid, // @[issue-slot.scala:52:14]
output io_request, // @[issue-slot.scala:52:14]
input io_grant, // @[issue-slot.scala:52:14]
output [31:0] io_iss_uop_inst, // @[issue-slot.scala:52:14]
output [31:0] io_iss_uop_debug_inst, // @[issue-slot.scala:52:14]
output io_iss_uop_is_rvc, // @[issue-slot.scala:52:14]
output [39:0] io_iss_uop_debug_pc, // @[issue-slot.scala:52:14]
output io_iss_uop_iq_type_0, // @[issue-slot.scala:52:14]
output io_iss_uop_iq_type_1, // @[issue-slot.scala:52:14]
output io_iss_uop_iq_type_2, // @[issue-slot.scala:52:14]
output io_iss_uop_iq_type_3, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_0, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_1, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_2, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_3, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_4, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_5, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_6, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_7, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_8, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_9, // @[issue-slot.scala:52:14]
output io_iss_uop_iw_issued, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
output io_iss_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
output io_iss_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
output io_iss_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_dis_col_sel, // @[issue-slot.scala:52:14]
output [15:0] io_iss_uop_br_mask, // @[issue-slot.scala:52:14]
output [3:0] io_iss_uop_br_tag, // @[issue-slot.scala:52:14]
output [3:0] io_iss_uop_br_type, // @[issue-slot.scala:52:14]
output io_iss_uop_is_sfb, // @[issue-slot.scala:52:14]
output io_iss_uop_is_fence, // @[issue-slot.scala:52:14]
output io_iss_uop_is_fencei, // @[issue-slot.scala:52:14]
output io_iss_uop_is_sfence, // @[issue-slot.scala:52:14]
output io_iss_uop_is_amo, // @[issue-slot.scala:52:14]
output io_iss_uop_is_eret, // @[issue-slot.scala:52:14]
output io_iss_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
output io_iss_uop_is_rocc, // @[issue-slot.scala:52:14]
output io_iss_uop_is_mov, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_ftq_idx, // @[issue-slot.scala:52:14]
output io_iss_uop_edge_inst, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_pc_lob, // @[issue-slot.scala:52:14]
output io_iss_uop_taken, // @[issue-slot.scala:52:14]
output io_iss_uop_imm_rename, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_imm_sel, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_pimm, // @[issue-slot.scala:52:14]
output [19:0] io_iss_uop_imm_packed, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_op1_sel, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_op2_sel, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_rob_idx, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_ldq_idx, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_stq_idx, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_rxq_idx, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_pdst, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_prs1, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_prs2, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_prs3, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_ppred, // @[issue-slot.scala:52:14]
output io_iss_uop_prs1_busy, // @[issue-slot.scala:52:14]
output io_iss_uop_prs2_busy, // @[issue-slot.scala:52:14]
output io_iss_uop_prs3_busy, // @[issue-slot.scala:52:14]
output io_iss_uop_ppred_busy, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_stale_pdst, // @[issue-slot.scala:52:14]
output io_iss_uop_exception, // @[issue-slot.scala:52:14]
output [63:0] io_iss_uop_exc_cause, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_mem_cmd, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_mem_size, // @[issue-slot.scala:52:14]
output io_iss_uop_mem_signed, // @[issue-slot.scala:52:14]
output io_iss_uop_uses_ldq, // @[issue-slot.scala:52:14]
output io_iss_uop_uses_stq, // @[issue-slot.scala:52:14]
output io_iss_uop_is_unique, // @[issue-slot.scala:52:14]
output io_iss_uop_flush_on_commit, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_csr_cmd, // @[issue-slot.scala:52:14]
output io_iss_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_ldst, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_lrs1, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_lrs2, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_lrs3, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_dst_rtype, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
output io_iss_uop_frs3_en, // @[issue-slot.scala:52:14]
output io_iss_uop_fcn_dw, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_fcn_op, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_val, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_fp_rm, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_fp_typ, // @[issue-slot.scala:52:14]
output io_iss_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
output io_iss_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
output io_iss_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
output io_iss_uop_bp_debug_if, // @[issue-slot.scala:52:14]
output io_iss_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_debug_fsrc, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_in_uop_valid, // @[issue-slot.scala:52:14]
input [31:0] io_in_uop_bits_inst, // @[issue-slot.scala:52:14]
input [31:0] io_in_uop_bits_debug_inst, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_in_uop_bits_debug_pc, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iq_type_0, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iq_type_1, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iq_type_2, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iq_type_3, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_0, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_1, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_2, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_3, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_4, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_5, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_6, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_7, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_8, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_9, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iw_issued, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_dis_col_sel, // @[issue-slot.scala:52:14]
input [15:0] io_in_uop_bits_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_in_uop_bits_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_in_uop_bits_br_type, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_sfb, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_fence, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_fencei, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_sfence, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_amo, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_eret, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_rocc, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_ftq_idx, // @[issue-slot.scala:52:14]
input io_in_uop_bits_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_pc_lob, // @[issue-slot.scala:52:14]
input io_in_uop_bits_taken, // @[issue-slot.scala:52:14]
input io_in_uop_bits_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_in_uop_bits_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_op2_sel, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_rob_idx, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_ldq_idx, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_ppred, // @[issue-slot.scala:52:14]
input io_in_uop_bits_prs1_busy, // @[issue-slot.scala:52:14]
input io_in_uop_bits_prs2_busy, // @[issue-slot.scala:52:14]
input io_in_uop_bits_prs3_busy, // @[issue-slot.scala:52:14]
input io_in_uop_bits_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_stale_pdst, // @[issue-slot.scala:52:14]
input io_in_uop_bits_exception, // @[issue-slot.scala:52:14]
input [63:0] io_in_uop_bits_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_mem_size, // @[issue-slot.scala:52:14]
input io_in_uop_bits_mem_signed, // @[issue-slot.scala:52:14]
input io_in_uop_bits_uses_ldq, // @[issue-slot.scala:52:14]
input io_in_uop_bits_uses_stq, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_unique, // @[issue-slot.scala:52:14]
input io_in_uop_bits_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_csr_cmd, // @[issue-slot.scala:52:14]
input io_in_uop_bits_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_in_uop_bits_frs3_en, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_fcn_op, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_fp_typ, // @[issue-slot.scala:52:14]
input io_in_uop_bits_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_in_uop_bits_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_in_uop_bits_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_in_uop_bits_bp_debug_if, // @[issue-slot.scala:52:14]
input io_in_uop_bits_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_debug_tsrc, // @[issue-slot.scala:52:14]
output [31:0] io_out_uop_inst, // @[issue-slot.scala:52:14]
output [31:0] io_out_uop_debug_inst, // @[issue-slot.scala:52:14]
output io_out_uop_is_rvc, // @[issue-slot.scala:52:14]
output [39:0] io_out_uop_debug_pc, // @[issue-slot.scala:52:14]
output io_out_uop_iq_type_0, // @[issue-slot.scala:52:14]
output io_out_uop_iq_type_1, // @[issue-slot.scala:52:14]
output io_out_uop_iq_type_2, // @[issue-slot.scala:52:14]
output io_out_uop_iq_type_3, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_0, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_1, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_2, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_3, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_4, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_5, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_6, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_7, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_8, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_9, // @[issue-slot.scala:52:14]
output io_out_uop_iw_issued, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
output io_out_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
output io_out_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
output io_out_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_dis_col_sel, // @[issue-slot.scala:52:14]
output [15:0] io_out_uop_br_mask, // @[issue-slot.scala:52:14]
output [3:0] io_out_uop_br_tag, // @[issue-slot.scala:52:14]
output [3:0] io_out_uop_br_type, // @[issue-slot.scala:52:14]
output io_out_uop_is_sfb, // @[issue-slot.scala:52:14]
output io_out_uop_is_fence, // @[issue-slot.scala:52:14]
output io_out_uop_is_fencei, // @[issue-slot.scala:52:14]
output io_out_uop_is_sfence, // @[issue-slot.scala:52:14]
output io_out_uop_is_amo, // @[issue-slot.scala:52:14]
output io_out_uop_is_eret, // @[issue-slot.scala:52:14]
output io_out_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
output io_out_uop_is_rocc, // @[issue-slot.scala:52:14]
output io_out_uop_is_mov, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_ftq_idx, // @[issue-slot.scala:52:14]
output io_out_uop_edge_inst, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_pc_lob, // @[issue-slot.scala:52:14]
output io_out_uop_taken, // @[issue-slot.scala:52:14]
output io_out_uop_imm_rename, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_imm_sel, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_pimm, // @[issue-slot.scala:52:14]
output [19:0] io_out_uop_imm_packed, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_op1_sel, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_op2_sel, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_rob_idx, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_ldq_idx, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_stq_idx, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_rxq_idx, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_pdst, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_prs1, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_prs2, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_prs3, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_ppred, // @[issue-slot.scala:52:14]
output io_out_uop_prs1_busy, // @[issue-slot.scala:52:14]
output io_out_uop_prs2_busy, // @[issue-slot.scala:52:14]
output io_out_uop_prs3_busy, // @[issue-slot.scala:52:14]
output io_out_uop_ppred_busy, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_stale_pdst, // @[issue-slot.scala:52:14]
output io_out_uop_exception, // @[issue-slot.scala:52:14]
output [63:0] io_out_uop_exc_cause, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_mem_cmd, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_mem_size, // @[issue-slot.scala:52:14]
output io_out_uop_mem_signed, // @[issue-slot.scala:52:14]
output io_out_uop_uses_ldq, // @[issue-slot.scala:52:14]
output io_out_uop_uses_stq, // @[issue-slot.scala:52:14]
output io_out_uop_is_unique, // @[issue-slot.scala:52:14]
output io_out_uop_flush_on_commit, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_csr_cmd, // @[issue-slot.scala:52:14]
output io_out_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_ldst, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_lrs1, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_lrs2, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_lrs3, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_dst_rtype, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
output io_out_uop_frs3_en, // @[issue-slot.scala:52:14]
output io_out_uop_fcn_dw, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_fcn_op, // @[issue-slot.scala:52:14]
output io_out_uop_fp_val, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_fp_rm, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_fp_typ, // @[issue-slot.scala:52:14]
output io_out_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
output io_out_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
output io_out_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
output io_out_uop_bp_debug_if, // @[issue-slot.scala:52:14]
output io_out_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_debug_fsrc, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input [15:0] io_brupdate_b1_resolve_mask, // @[issue-slot.scala:52:14]
input [15:0] io_brupdate_b1_mispredict_mask, // @[issue-slot.scala:52:14]
input [31:0] io_brupdate_b2_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_brupdate_b2_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_brupdate_b2_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_issued, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_issued_partial_agen, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_issued_partial_dgen, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [15:0] io_brupdate_b2_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_brupdate_b2_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_brupdate_b2_uop_br_type, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_fence, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_amo, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_eret, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_taken, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_brupdate_b2_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_rob_idx, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_ppred, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_brupdate_b2_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_mem_size, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_unique, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_brupdate_b2_mispredict, // @[issue-slot.scala:52:14]
input io_brupdate_b2_taken, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_cfi_type, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_pc_sel, // @[issue-slot.scala:52:14]
input [39:0] io_brupdate_b2_jalr_target, // @[issue-slot.scala:52:14]
input [20:0] io_brupdate_b2_target_offset, // @[issue-slot.scala:52:14]
input io_kill, // @[issue-slot.scala:52:14]
input io_clear, // @[issue-slot.scala:52:14]
input io_squash_grant, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_valid, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_0_bits_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_0_bits_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_wakeup_ports_0_bits_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_issued, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_issued_partial_agen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_issued_partial_dgen, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [15:0] io_wakeup_ports_0_bits_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_0_bits_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_0_bits_uop_br_type, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_fence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_amo, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_eret, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_taken, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_wakeup_ports_0_bits_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_rob_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_ppred, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_wakeup_ports_0_bits_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_mem_size, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_unique, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_bypassable, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_speculative_mask, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_rebusy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_valid, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_1_bits_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_1_bits_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_wakeup_ports_1_bits_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_issued, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_issued_partial_agen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_issued_partial_dgen, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [15:0] io_wakeup_ports_1_bits_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_1_bits_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_1_bits_uop_br_type, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_fence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_amo, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_eret, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_taken, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_wakeup_ports_1_bits_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_rob_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_ppred, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_wakeup_ports_1_bits_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_mem_size, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_unique, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_valid, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_2_bits_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_2_bits_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_wakeup_ports_2_bits_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iw_issued, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [15:0] io_wakeup_ports_2_bits_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_2_bits_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_2_bits_uop_br_type, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_fence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_amo, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_eret, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_2_bits_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_taken, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_wakeup_ports_2_bits_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_2_bits_uop_rob_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_2_bits_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_2_bits_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_2_bits_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_2_bits_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_ppred, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_2_bits_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_wakeup_ports_2_bits_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_mem_size, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_unique, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_2_bits_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_2_bits_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_2_bits_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_2_bits_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_valid, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_3_bits_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_3_bits_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_wakeup_ports_3_bits_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iw_issued, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [15:0] io_wakeup_ports_3_bits_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_3_bits_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_3_bits_uop_br_type, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_fence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_amo, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_eret, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_3_bits_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_taken, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_wakeup_ports_3_bits_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_3_bits_uop_rob_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_3_bits_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_3_bits_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_3_bits_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_3_bits_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_ppred, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_3_bits_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_wakeup_ports_3_bits_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_mem_size, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_unique, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_3_bits_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_3_bits_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_3_bits_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_3_bits_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_valid, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_4_bits_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_4_bits_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_wakeup_ports_4_bits_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iw_issued, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [15:0] io_wakeup_ports_4_bits_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_4_bits_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_4_bits_uop_br_type, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_fence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_amo, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_eret, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_4_bits_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_4_bits_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_taken, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_4_bits_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_wakeup_ports_4_bits_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_4_bits_uop_rob_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_4_bits_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_4_bits_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_4_bits_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_4_bits_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_4_bits_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_4_bits_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_4_bits_uop_ppred, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_4_bits_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_wakeup_ports_4_bits_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_4_bits_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_mem_size, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_unique, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_4_bits_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_4_bits_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_4_bits_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_4_bits_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_4_bits_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_pred_wakeup_port_valid, // @[issue-slot.scala:52:14]
input [4:0] io_pred_wakeup_port_bits, // @[issue-slot.scala:52:14]
input [2:0] io_child_rebusys // @[issue-slot.scala:52:14]
);
wire [15:0] next_uop_out_br_mask; // @[util.scala:104:23]
wire io_grant_0 = io_grant; // @[issue-slot.scala:49:7]
wire io_in_uop_valid_0 = io_in_uop_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_in_uop_bits_inst_0 = io_in_uop_bits_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_in_uop_bits_debug_inst_0 = io_in_uop_bits_debug_inst; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_rvc_0 = io_in_uop_bits_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_in_uop_bits_debug_pc_0 = io_in_uop_bits_debug_pc; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iq_type_0_0 = io_in_uop_bits_iq_type_0; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iq_type_1_0 = io_in_uop_bits_iq_type_1; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iq_type_2_0 = io_in_uop_bits_iq_type_2; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iq_type_3_0 = io_in_uop_bits_iq_type_3; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_0_0 = io_in_uop_bits_fu_code_0; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_1_0 = io_in_uop_bits_fu_code_1; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_2_0 = io_in_uop_bits_fu_code_2; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_3_0 = io_in_uop_bits_fu_code_3; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_4_0 = io_in_uop_bits_fu_code_4; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_5_0 = io_in_uop_bits_fu_code_5; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_6_0 = io_in_uop_bits_fu_code_6; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_7_0 = io_in_uop_bits_fu_code_7; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_8_0 = io_in_uop_bits_fu_code_8; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_9_0 = io_in_uop_bits_fu_code_9; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_issued_0 = io_in_uop_bits_iw_issued; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_iw_p1_speculative_child_0 = io_in_uop_bits_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_iw_p2_speculative_child_0 = io_in_uop_bits_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_p1_bypass_hint_0 = io_in_uop_bits_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_p2_bypass_hint_0 = io_in_uop_bits_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_p3_bypass_hint_0 = io_in_uop_bits_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_dis_col_sel_0 = io_in_uop_bits_dis_col_sel; // @[issue-slot.scala:49:7]
wire [15:0] io_in_uop_bits_br_mask_0 = io_in_uop_bits_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_in_uop_bits_br_tag_0 = io_in_uop_bits_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_in_uop_bits_br_type_0 = io_in_uop_bits_br_type; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_sfb_0 = io_in_uop_bits_is_sfb; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_fence_0 = io_in_uop_bits_is_fence; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_fencei_0 = io_in_uop_bits_is_fencei; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_sfence_0 = io_in_uop_bits_is_sfence; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_amo_0 = io_in_uop_bits_is_amo; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_eret_0 = io_in_uop_bits_is_eret; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_sys_pc2epc_0 = io_in_uop_bits_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_rocc_0 = io_in_uop_bits_is_rocc; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_mov_0 = io_in_uop_bits_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_ftq_idx_0 = io_in_uop_bits_ftq_idx; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_edge_inst_0 = io_in_uop_bits_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_pc_lob_0 = io_in_uop_bits_pc_lob; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_taken_0 = io_in_uop_bits_taken; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_imm_rename_0 = io_in_uop_bits_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_imm_sel_0 = io_in_uop_bits_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_pimm_0 = io_in_uop_bits_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_in_uop_bits_imm_packed_0 = io_in_uop_bits_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_op1_sel_0 = io_in_uop_bits_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_op2_sel_0 = io_in_uop_bits_op2_sel; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_ldst_0 = io_in_uop_bits_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_wen_0 = io_in_uop_bits_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_ren1_0 = io_in_uop_bits_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_ren2_0 = io_in_uop_bits_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_ren3_0 = io_in_uop_bits_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_swap12_0 = io_in_uop_bits_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_swap23_0 = io_in_uop_bits_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_fp_ctrl_typeTagIn_0 = io_in_uop_bits_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_fp_ctrl_typeTagOut_0 = io_in_uop_bits_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_fromint_0 = io_in_uop_bits_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_toint_0 = io_in_uop_bits_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_fastpipe_0 = io_in_uop_bits_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_fma_0 = io_in_uop_bits_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_div_0 = io_in_uop_bits_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_sqrt_0 = io_in_uop_bits_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_wflags_0 = io_in_uop_bits_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_vec_0 = io_in_uop_bits_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_rob_idx_0 = io_in_uop_bits_rob_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_ldq_idx_0 = io_in_uop_bits_ldq_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_stq_idx_0 = io_in_uop_bits_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_rxq_idx_0 = io_in_uop_bits_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_pdst_0 = io_in_uop_bits_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_prs1_0 = io_in_uop_bits_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_prs2_0 = io_in_uop_bits_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_prs3_0 = io_in_uop_bits_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_ppred_0 = io_in_uop_bits_ppred; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_prs1_busy_0 = io_in_uop_bits_prs1_busy; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_prs2_busy_0 = io_in_uop_bits_prs2_busy; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_prs3_busy_0 = io_in_uop_bits_prs3_busy; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_ppred_busy_0 = io_in_uop_bits_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_stale_pdst_0 = io_in_uop_bits_stale_pdst; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_exception_0 = io_in_uop_bits_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_in_uop_bits_exc_cause_0 = io_in_uop_bits_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_mem_cmd_0 = io_in_uop_bits_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_mem_size_0 = io_in_uop_bits_mem_size; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_mem_signed_0 = io_in_uop_bits_mem_signed; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_uses_ldq_0 = io_in_uop_bits_uses_ldq; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_uses_stq_0 = io_in_uop_bits_uses_stq; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_unique_0 = io_in_uop_bits_is_unique; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_flush_on_commit_0 = io_in_uop_bits_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_csr_cmd_0 = io_in_uop_bits_csr_cmd; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_ldst_is_rs1_0 = io_in_uop_bits_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_ldst_0 = io_in_uop_bits_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_lrs1_0 = io_in_uop_bits_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_lrs2_0 = io_in_uop_bits_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_lrs3_0 = io_in_uop_bits_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_dst_rtype_0 = io_in_uop_bits_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_lrs1_rtype_0 = io_in_uop_bits_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_lrs2_rtype_0 = io_in_uop_bits_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_frs3_en_0 = io_in_uop_bits_frs3_en; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fcn_dw_0 = io_in_uop_bits_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_fcn_op_0 = io_in_uop_bits_fcn_op; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_val_0 = io_in_uop_bits_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_fp_rm_0 = io_in_uop_bits_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_fp_typ_0 = io_in_uop_bits_fp_typ; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_xcpt_pf_if_0 = io_in_uop_bits_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_xcpt_ae_if_0 = io_in_uop_bits_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_xcpt_ma_if_0 = io_in_uop_bits_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_bp_debug_if_0 = io_in_uop_bits_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_bp_xcpt_if_0 = io_in_uop_bits_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_debug_fsrc_0 = io_in_uop_bits_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_debug_tsrc_0 = io_in_uop_bits_debug_tsrc; // @[issue-slot.scala:49:7]
wire [15:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[issue-slot.scala:49:7]
wire [15:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[issue-slot.scala:49:7]
wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iq_type_0_0 = io_brupdate_b2_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iq_type_1_0 = io_brupdate_b2_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iq_type_2_0 = io_brupdate_b2_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iq_type_3_0 = io_brupdate_b2_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_0_0 = io_brupdate_b2_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_1_0 = io_brupdate_b2_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_2_0 = io_brupdate_b2_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_3_0 = io_brupdate_b2_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_4_0 = io_brupdate_b2_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_5_0 = io_brupdate_b2_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_6_0 = io_brupdate_b2_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_7_0 = io_brupdate_b2_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_8_0 = io_brupdate_b2_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_9_0 = io_brupdate_b2_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_issued_0 = io_brupdate_b2_uop_iw_issued; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_issued_partial_agen_0 = io_brupdate_b2_uop_iw_issued_partial_agen; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_issued_partial_dgen_0 = io_brupdate_b2_uop_iw_issued_partial_dgen; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_iw_p1_speculative_child_0 = io_brupdate_b2_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_iw_p2_speculative_child_0 = io_brupdate_b2_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_p1_bypass_hint_0 = io_brupdate_b2_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_p2_bypass_hint_0 = io_brupdate_b2_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_p3_bypass_hint_0 = io_brupdate_b2_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_dis_col_sel_0 = io_brupdate_b2_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [15:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_brupdate_b2_uop_br_type_0 = io_brupdate_b2_uop_br_type; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_sfence_0 = io_brupdate_b2_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_eret_0 = io_brupdate_b2_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_rocc_0 = io_brupdate_b2_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_mov_0 = io_brupdate_b2_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_imm_rename_0 = io_brupdate_b2_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_imm_sel_0 = io_brupdate_b2_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_pimm_0 = io_brupdate_b2_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_op1_sel_0 = io_brupdate_b2_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_op2_sel_0 = io_brupdate_b2_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_ldst_0 = io_brupdate_b2_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_wen_0 = io_brupdate_b2_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_ren1_0 = io_brupdate_b2_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_ren2_0 = io_brupdate_b2_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_ren3_0 = io_brupdate_b2_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_swap12_0 = io_brupdate_b2_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_swap23_0 = io_brupdate_b2_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagIn_0 = io_brupdate_b2_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagOut_0 = io_brupdate_b2_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_fromint_0 = io_brupdate_b2_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_toint_0 = io_brupdate_b2_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_fastpipe_0 = io_brupdate_b2_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_fma_0 = io_brupdate_b2_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_div_0 = io_brupdate_b2_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_sqrt_0 = io_brupdate_b2_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_wflags_0 = io_brupdate_b2_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_vec_0 = io_brupdate_b2_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_csr_cmd_0 = io_brupdate_b2_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fcn_dw_0 = io_brupdate_b2_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_fcn_op_0 = io_brupdate_b2_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_fp_rm_0 = io_brupdate_b2_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_fp_typ_0 = io_brupdate_b2_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[issue-slot.scala:49:7]
wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[issue-slot.scala:49:7]
wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[issue-slot.scala:49:7]
wire io_kill_0 = io_kill; // @[issue-slot.scala:49:7]
wire io_clear_0 = io_clear; // @[issue-slot.scala:49:7]
wire io_squash_grant_0 = io_squash_grant; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_valid_0 = io_wakeup_ports_0_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_0_bits_uop_inst_0 = io_wakeup_ports_0_bits_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_0_bits_uop_debug_inst_0 = io_wakeup_ports_0_bits_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_rvc_0 = io_wakeup_ports_0_bits_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_wakeup_ports_0_bits_uop_debug_pc_0 = io_wakeup_ports_0_bits_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iq_type_0_0 = io_wakeup_ports_0_bits_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iq_type_1_0 = io_wakeup_ports_0_bits_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iq_type_2_0 = io_wakeup_ports_0_bits_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iq_type_3_0 = io_wakeup_ports_0_bits_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_0_0 = io_wakeup_ports_0_bits_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_1_0 = io_wakeup_ports_0_bits_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_2_0 = io_wakeup_ports_0_bits_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_3_0 = io_wakeup_ports_0_bits_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_4_0 = io_wakeup_ports_0_bits_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_5_0 = io_wakeup_ports_0_bits_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_6_0 = io_wakeup_ports_0_bits_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_7_0 = io_wakeup_ports_0_bits_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_8_0 = io_wakeup_ports_0_bits_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_9_0 = io_wakeup_ports_0_bits_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_issued_0 = io_wakeup_ports_0_bits_uop_iw_issued; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_issued_partial_agen_0 = io_wakeup_ports_0_bits_uop_iw_issued_partial_agen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_issued_partial_dgen_0 = io_wakeup_ports_0_bits_uop_iw_issued_partial_dgen; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_iw_p1_speculative_child_0 = io_wakeup_ports_0_bits_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_iw_p2_speculative_child_0 = io_wakeup_ports_0_bits_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_p1_bypass_hint_0 = io_wakeup_ports_0_bits_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_p2_bypass_hint_0 = io_wakeup_ports_0_bits_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_p3_bypass_hint_0 = io_wakeup_ports_0_bits_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_dis_col_sel_0 = io_wakeup_ports_0_bits_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [15:0] io_wakeup_ports_0_bits_uop_br_mask_0 = io_wakeup_ports_0_bits_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_0_bits_uop_br_tag_0 = io_wakeup_ports_0_bits_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_0_bits_uop_br_type_0 = io_wakeup_ports_0_bits_uop_br_type; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_sfb_0 = io_wakeup_ports_0_bits_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_fence_0 = io_wakeup_ports_0_bits_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_fencei_0 = io_wakeup_ports_0_bits_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_sfence_0 = io_wakeup_ports_0_bits_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_amo_0 = io_wakeup_ports_0_bits_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_eret_0 = io_wakeup_ports_0_bits_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_sys_pc2epc_0 = io_wakeup_ports_0_bits_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_rocc_0 = io_wakeup_ports_0_bits_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_mov_0 = io_wakeup_ports_0_bits_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_ftq_idx_0 = io_wakeup_ports_0_bits_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_edge_inst_0 = io_wakeup_ports_0_bits_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_pc_lob_0 = io_wakeup_ports_0_bits_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_taken_0 = io_wakeup_ports_0_bits_uop_taken; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_imm_rename_0 = io_wakeup_ports_0_bits_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_imm_sel_0 = io_wakeup_ports_0_bits_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_pimm_0 = io_wakeup_ports_0_bits_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_wakeup_ports_0_bits_uop_imm_packed_0 = io_wakeup_ports_0_bits_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_op1_sel_0 = io_wakeup_ports_0_bits_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_op2_sel_0 = io_wakeup_ports_0_bits_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_ldst_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_wen_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_ren1_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_ren2_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_ren3_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_swap12_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_swap23_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagIn_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagOut_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_fromint_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_toint_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_fastpipe_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_fma_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_div_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_sqrt_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_wflags_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_vec_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_rob_idx_0 = io_wakeup_ports_0_bits_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_ldq_idx_0 = io_wakeup_ports_0_bits_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_stq_idx_0 = io_wakeup_ports_0_bits_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_rxq_idx_0 = io_wakeup_ports_0_bits_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_pdst_0 = io_wakeup_ports_0_bits_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_prs1_0 = io_wakeup_ports_0_bits_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_prs2_0 = io_wakeup_ports_0_bits_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_prs3_0 = io_wakeup_ports_0_bits_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_ppred_0 = io_wakeup_ports_0_bits_uop_ppred; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_prs1_busy_0 = io_wakeup_ports_0_bits_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_prs2_busy_0 = io_wakeup_ports_0_bits_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_prs3_busy_0 = io_wakeup_ports_0_bits_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_ppred_busy_0 = io_wakeup_ports_0_bits_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_stale_pdst_0 = io_wakeup_ports_0_bits_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_exception_0 = io_wakeup_ports_0_bits_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_wakeup_ports_0_bits_uop_exc_cause_0 = io_wakeup_ports_0_bits_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_mem_cmd_0 = io_wakeup_ports_0_bits_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_mem_size_0 = io_wakeup_ports_0_bits_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_mem_signed_0 = io_wakeup_ports_0_bits_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_uses_ldq_0 = io_wakeup_ports_0_bits_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_uses_stq_0 = io_wakeup_ports_0_bits_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_unique_0 = io_wakeup_ports_0_bits_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_flush_on_commit_0 = io_wakeup_ports_0_bits_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_csr_cmd_0 = io_wakeup_ports_0_bits_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_ldst_is_rs1_0 = io_wakeup_ports_0_bits_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_ldst_0 = io_wakeup_ports_0_bits_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_lrs1_0 = io_wakeup_ports_0_bits_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_lrs2_0 = io_wakeup_ports_0_bits_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_lrs3_0 = io_wakeup_ports_0_bits_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_dst_rtype_0 = io_wakeup_ports_0_bits_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_lrs1_rtype_0 = io_wakeup_ports_0_bits_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_lrs2_rtype_0 = io_wakeup_ports_0_bits_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_frs3_en_0 = io_wakeup_ports_0_bits_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fcn_dw_0 = io_wakeup_ports_0_bits_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_fcn_op_0 = io_wakeup_ports_0_bits_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_val_0 = io_wakeup_ports_0_bits_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_fp_rm_0 = io_wakeup_ports_0_bits_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_fp_typ_0 = io_wakeup_ports_0_bits_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_xcpt_pf_if_0 = io_wakeup_ports_0_bits_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_xcpt_ae_if_0 = io_wakeup_ports_0_bits_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_xcpt_ma_if_0 = io_wakeup_ports_0_bits_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_bp_debug_if_0 = io_wakeup_ports_0_bits_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_bp_xcpt_if_0 = io_wakeup_ports_0_bits_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_debug_fsrc_0 = io_wakeup_ports_0_bits_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_debug_tsrc_0 = io_wakeup_ports_0_bits_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_bypassable_0 = io_wakeup_ports_0_bits_bypassable; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_speculative_mask_0 = io_wakeup_ports_0_bits_speculative_mask; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_rebusy_0 = io_wakeup_ports_0_bits_rebusy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_valid_0 = io_wakeup_ports_1_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_1_bits_uop_inst_0 = io_wakeup_ports_1_bits_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_1_bits_uop_debug_inst_0 = io_wakeup_ports_1_bits_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_rvc_0 = io_wakeup_ports_1_bits_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_wakeup_ports_1_bits_uop_debug_pc_0 = io_wakeup_ports_1_bits_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iq_type_0_0 = io_wakeup_ports_1_bits_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iq_type_1_0 = io_wakeup_ports_1_bits_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iq_type_2_0 = io_wakeup_ports_1_bits_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iq_type_3_0 = io_wakeup_ports_1_bits_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_0_0 = io_wakeup_ports_1_bits_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_1_0 = io_wakeup_ports_1_bits_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_2_0 = io_wakeup_ports_1_bits_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_3_0 = io_wakeup_ports_1_bits_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_4_0 = io_wakeup_ports_1_bits_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_5_0 = io_wakeup_ports_1_bits_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_6_0 = io_wakeup_ports_1_bits_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_7_0 = io_wakeup_ports_1_bits_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_8_0 = io_wakeup_ports_1_bits_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_9_0 = io_wakeup_ports_1_bits_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_issued_0 = io_wakeup_ports_1_bits_uop_iw_issued; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_issued_partial_agen_0 = io_wakeup_ports_1_bits_uop_iw_issued_partial_agen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_issued_partial_dgen_0 = io_wakeup_ports_1_bits_uop_iw_issued_partial_dgen; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_iw_p1_speculative_child_0 = io_wakeup_ports_1_bits_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_iw_p2_speculative_child_0 = io_wakeup_ports_1_bits_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_p1_bypass_hint_0 = io_wakeup_ports_1_bits_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_p2_bypass_hint_0 = io_wakeup_ports_1_bits_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_p3_bypass_hint_0 = io_wakeup_ports_1_bits_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_dis_col_sel_0 = io_wakeup_ports_1_bits_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [15:0] io_wakeup_ports_1_bits_uop_br_mask_0 = io_wakeup_ports_1_bits_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_1_bits_uop_br_tag_0 = io_wakeup_ports_1_bits_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_1_bits_uop_br_type_0 = io_wakeup_ports_1_bits_uop_br_type; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_sfb_0 = io_wakeup_ports_1_bits_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_fence_0 = io_wakeup_ports_1_bits_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_fencei_0 = io_wakeup_ports_1_bits_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_sfence_0 = io_wakeup_ports_1_bits_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_amo_0 = io_wakeup_ports_1_bits_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_eret_0 = io_wakeup_ports_1_bits_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_sys_pc2epc_0 = io_wakeup_ports_1_bits_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_rocc_0 = io_wakeup_ports_1_bits_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_mov_0 = io_wakeup_ports_1_bits_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_ftq_idx_0 = io_wakeup_ports_1_bits_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_edge_inst_0 = io_wakeup_ports_1_bits_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_pc_lob_0 = io_wakeup_ports_1_bits_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_taken_0 = io_wakeup_ports_1_bits_uop_taken; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_imm_rename_0 = io_wakeup_ports_1_bits_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_imm_sel_0 = io_wakeup_ports_1_bits_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_pimm_0 = io_wakeup_ports_1_bits_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_wakeup_ports_1_bits_uop_imm_packed_0 = io_wakeup_ports_1_bits_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_op1_sel_0 = io_wakeup_ports_1_bits_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_op2_sel_0 = io_wakeup_ports_1_bits_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_ldst_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_wen_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_ren1_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_ren2_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_ren3_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_swap12_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_swap23_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagIn_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagOut_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_fromint_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_toint_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_fastpipe_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_fma_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_div_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_sqrt_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_wflags_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_vec_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_rob_idx_0 = io_wakeup_ports_1_bits_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_ldq_idx_0 = io_wakeup_ports_1_bits_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_stq_idx_0 = io_wakeup_ports_1_bits_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_rxq_idx_0 = io_wakeup_ports_1_bits_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_pdst_0 = io_wakeup_ports_1_bits_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_prs1_0 = io_wakeup_ports_1_bits_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_prs2_0 = io_wakeup_ports_1_bits_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_prs3_0 = io_wakeup_ports_1_bits_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_ppred_0 = io_wakeup_ports_1_bits_uop_ppred; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_prs1_busy_0 = io_wakeup_ports_1_bits_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_prs2_busy_0 = io_wakeup_ports_1_bits_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_prs3_busy_0 = io_wakeup_ports_1_bits_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_ppred_busy_0 = io_wakeup_ports_1_bits_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_stale_pdst_0 = io_wakeup_ports_1_bits_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_exception_0 = io_wakeup_ports_1_bits_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_wakeup_ports_1_bits_uop_exc_cause_0 = io_wakeup_ports_1_bits_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_mem_cmd_0 = io_wakeup_ports_1_bits_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_mem_size_0 = io_wakeup_ports_1_bits_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_mem_signed_0 = io_wakeup_ports_1_bits_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_uses_ldq_0 = io_wakeup_ports_1_bits_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_uses_stq_0 = io_wakeup_ports_1_bits_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_unique_0 = io_wakeup_ports_1_bits_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_flush_on_commit_0 = io_wakeup_ports_1_bits_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_csr_cmd_0 = io_wakeup_ports_1_bits_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_ldst_is_rs1_0 = io_wakeup_ports_1_bits_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_ldst_0 = io_wakeup_ports_1_bits_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_lrs1_0 = io_wakeup_ports_1_bits_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_lrs2_0 = io_wakeup_ports_1_bits_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_lrs3_0 = io_wakeup_ports_1_bits_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_dst_rtype_0 = io_wakeup_ports_1_bits_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_lrs1_rtype_0 = io_wakeup_ports_1_bits_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_lrs2_rtype_0 = io_wakeup_ports_1_bits_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_frs3_en_0 = io_wakeup_ports_1_bits_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fcn_dw_0 = io_wakeup_ports_1_bits_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_fcn_op_0 = io_wakeup_ports_1_bits_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_val_0 = io_wakeup_ports_1_bits_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_fp_rm_0 = io_wakeup_ports_1_bits_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_fp_typ_0 = io_wakeup_ports_1_bits_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_xcpt_pf_if_0 = io_wakeup_ports_1_bits_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_xcpt_ae_if_0 = io_wakeup_ports_1_bits_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_xcpt_ma_if_0 = io_wakeup_ports_1_bits_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_bp_debug_if_0 = io_wakeup_ports_1_bits_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_bp_xcpt_if_0 = io_wakeup_ports_1_bits_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_debug_fsrc_0 = io_wakeup_ports_1_bits_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_debug_tsrc_0 = io_wakeup_ports_1_bits_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_valid_0 = io_wakeup_ports_2_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_2_bits_uop_inst_0 = io_wakeup_ports_2_bits_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_2_bits_uop_debug_inst_0 = io_wakeup_ports_2_bits_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_rvc_0 = io_wakeup_ports_2_bits_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_wakeup_ports_2_bits_uop_debug_pc_0 = io_wakeup_ports_2_bits_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iq_type_0_0 = io_wakeup_ports_2_bits_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iq_type_1_0 = io_wakeup_ports_2_bits_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iq_type_2_0 = io_wakeup_ports_2_bits_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iq_type_3_0 = io_wakeup_ports_2_bits_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_0_0 = io_wakeup_ports_2_bits_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_1_0 = io_wakeup_ports_2_bits_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_2_0 = io_wakeup_ports_2_bits_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_3_0 = io_wakeup_ports_2_bits_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_4_0 = io_wakeup_ports_2_bits_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_5_0 = io_wakeup_ports_2_bits_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_6_0 = io_wakeup_ports_2_bits_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_7_0 = io_wakeup_ports_2_bits_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_8_0 = io_wakeup_ports_2_bits_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_9_0 = io_wakeup_ports_2_bits_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_issued_0 = io_wakeup_ports_2_bits_uop_iw_issued; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_iw_p1_speculative_child_0 = io_wakeup_ports_2_bits_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_iw_p2_speculative_child_0 = io_wakeup_ports_2_bits_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_p1_bypass_hint_0 = io_wakeup_ports_2_bits_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_p2_bypass_hint_0 = io_wakeup_ports_2_bits_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_p3_bypass_hint_0 = io_wakeup_ports_2_bits_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_dis_col_sel_0 = io_wakeup_ports_2_bits_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [15:0] io_wakeup_ports_2_bits_uop_br_mask_0 = io_wakeup_ports_2_bits_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_2_bits_uop_br_tag_0 = io_wakeup_ports_2_bits_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_2_bits_uop_br_type_0 = io_wakeup_ports_2_bits_uop_br_type; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_sfb_0 = io_wakeup_ports_2_bits_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_fence_0 = io_wakeup_ports_2_bits_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_fencei_0 = io_wakeup_ports_2_bits_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_sfence_0 = io_wakeup_ports_2_bits_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_amo_0 = io_wakeup_ports_2_bits_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_eret_0 = io_wakeup_ports_2_bits_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_sys_pc2epc_0 = io_wakeup_ports_2_bits_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_rocc_0 = io_wakeup_ports_2_bits_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_mov_0 = io_wakeup_ports_2_bits_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_ftq_idx_0 = io_wakeup_ports_2_bits_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_edge_inst_0 = io_wakeup_ports_2_bits_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_2_bits_uop_pc_lob_0 = io_wakeup_ports_2_bits_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_taken_0 = io_wakeup_ports_2_bits_uop_taken; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_imm_rename_0 = io_wakeup_ports_2_bits_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_imm_sel_0 = io_wakeup_ports_2_bits_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_pimm_0 = io_wakeup_ports_2_bits_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_wakeup_ports_2_bits_uop_imm_packed_0 = io_wakeup_ports_2_bits_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_op1_sel_0 = io_wakeup_ports_2_bits_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_op2_sel_0 = io_wakeup_ports_2_bits_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_ldst_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_wen_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_ren1_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_ren2_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_ren3_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_swap12_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_swap23_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagIn_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagOut_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_fromint_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_toint_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_fastpipe_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_fma_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_div_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_sqrt_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_wflags_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_vec_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_2_bits_uop_rob_idx_0 = io_wakeup_ports_2_bits_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_ldq_idx_0 = io_wakeup_ports_2_bits_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_stq_idx_0 = io_wakeup_ports_2_bits_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_rxq_idx_0 = io_wakeup_ports_2_bits_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_2_bits_uop_pdst_0 = io_wakeup_ports_2_bits_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_2_bits_uop_prs1_0 = io_wakeup_ports_2_bits_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_2_bits_uop_prs2_0 = io_wakeup_ports_2_bits_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_2_bits_uop_prs3_0 = io_wakeup_ports_2_bits_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_ppred_0 = io_wakeup_ports_2_bits_uop_ppred; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_prs1_busy_0 = io_wakeup_ports_2_bits_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_prs2_busy_0 = io_wakeup_ports_2_bits_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_prs3_busy_0 = io_wakeup_ports_2_bits_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_ppred_busy_0 = io_wakeup_ports_2_bits_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_2_bits_uop_stale_pdst_0 = io_wakeup_ports_2_bits_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_exception_0 = io_wakeup_ports_2_bits_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_wakeup_ports_2_bits_uop_exc_cause_0 = io_wakeup_ports_2_bits_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_mem_cmd_0 = io_wakeup_ports_2_bits_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_mem_size_0 = io_wakeup_ports_2_bits_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_mem_signed_0 = io_wakeup_ports_2_bits_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_uses_ldq_0 = io_wakeup_ports_2_bits_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_uses_stq_0 = io_wakeup_ports_2_bits_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_unique_0 = io_wakeup_ports_2_bits_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_flush_on_commit_0 = io_wakeup_ports_2_bits_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_csr_cmd_0 = io_wakeup_ports_2_bits_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_ldst_is_rs1_0 = io_wakeup_ports_2_bits_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_2_bits_uop_ldst_0 = io_wakeup_ports_2_bits_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_2_bits_uop_lrs1_0 = io_wakeup_ports_2_bits_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_2_bits_uop_lrs2_0 = io_wakeup_ports_2_bits_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_2_bits_uop_lrs3_0 = io_wakeup_ports_2_bits_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_dst_rtype_0 = io_wakeup_ports_2_bits_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_lrs1_rtype_0 = io_wakeup_ports_2_bits_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_lrs2_rtype_0 = io_wakeup_ports_2_bits_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_frs3_en_0 = io_wakeup_ports_2_bits_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fcn_dw_0 = io_wakeup_ports_2_bits_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_fcn_op_0 = io_wakeup_ports_2_bits_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_val_0 = io_wakeup_ports_2_bits_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_fp_rm_0 = io_wakeup_ports_2_bits_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_fp_typ_0 = io_wakeup_ports_2_bits_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_xcpt_pf_if_0 = io_wakeup_ports_2_bits_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_xcpt_ae_if_0 = io_wakeup_ports_2_bits_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_xcpt_ma_if_0 = io_wakeup_ports_2_bits_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_bp_debug_if_0 = io_wakeup_ports_2_bits_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_bp_xcpt_if_0 = io_wakeup_ports_2_bits_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_debug_fsrc_0 = io_wakeup_ports_2_bits_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_debug_tsrc_0 = io_wakeup_ports_2_bits_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_valid_0 = io_wakeup_ports_3_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_3_bits_uop_inst_0 = io_wakeup_ports_3_bits_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_3_bits_uop_debug_inst_0 = io_wakeup_ports_3_bits_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_rvc_0 = io_wakeup_ports_3_bits_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_wakeup_ports_3_bits_uop_debug_pc_0 = io_wakeup_ports_3_bits_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iq_type_0_0 = io_wakeup_ports_3_bits_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iq_type_1_0 = io_wakeup_ports_3_bits_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iq_type_2_0 = io_wakeup_ports_3_bits_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iq_type_3_0 = io_wakeup_ports_3_bits_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_0_0 = io_wakeup_ports_3_bits_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_1_0 = io_wakeup_ports_3_bits_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_2_0 = io_wakeup_ports_3_bits_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_3_0 = io_wakeup_ports_3_bits_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_4_0 = io_wakeup_ports_3_bits_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_5_0 = io_wakeup_ports_3_bits_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_6_0 = io_wakeup_ports_3_bits_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_7_0 = io_wakeup_ports_3_bits_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_8_0 = io_wakeup_ports_3_bits_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_9_0 = io_wakeup_ports_3_bits_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_issued_0 = io_wakeup_ports_3_bits_uop_iw_issued; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_iw_p1_speculative_child_0 = io_wakeup_ports_3_bits_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_iw_p2_speculative_child_0 = io_wakeup_ports_3_bits_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_p1_bypass_hint_0 = io_wakeup_ports_3_bits_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_p2_bypass_hint_0 = io_wakeup_ports_3_bits_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_p3_bypass_hint_0 = io_wakeup_ports_3_bits_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_dis_col_sel_0 = io_wakeup_ports_3_bits_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [15:0] io_wakeup_ports_3_bits_uop_br_mask_0 = io_wakeup_ports_3_bits_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_3_bits_uop_br_tag_0 = io_wakeup_ports_3_bits_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_3_bits_uop_br_type_0 = io_wakeup_ports_3_bits_uop_br_type; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_sfb_0 = io_wakeup_ports_3_bits_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_fence_0 = io_wakeup_ports_3_bits_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_fencei_0 = io_wakeup_ports_3_bits_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_sfence_0 = io_wakeup_ports_3_bits_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_amo_0 = io_wakeup_ports_3_bits_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_eret_0 = io_wakeup_ports_3_bits_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_sys_pc2epc_0 = io_wakeup_ports_3_bits_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_rocc_0 = io_wakeup_ports_3_bits_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_mov_0 = io_wakeup_ports_3_bits_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_ftq_idx_0 = io_wakeup_ports_3_bits_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_edge_inst_0 = io_wakeup_ports_3_bits_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_3_bits_uop_pc_lob_0 = io_wakeup_ports_3_bits_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_taken_0 = io_wakeup_ports_3_bits_uop_taken; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_imm_rename_0 = io_wakeup_ports_3_bits_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_imm_sel_0 = io_wakeup_ports_3_bits_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_pimm_0 = io_wakeup_ports_3_bits_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_wakeup_ports_3_bits_uop_imm_packed_0 = io_wakeup_ports_3_bits_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_op1_sel_0 = io_wakeup_ports_3_bits_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_op2_sel_0 = io_wakeup_ports_3_bits_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_ldst_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_wen_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_ren1_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_ren2_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_ren3_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_swap12_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_swap23_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagIn_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagOut_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_fromint_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_toint_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_fastpipe_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_fma_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_div_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_sqrt_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_wflags_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_vec_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_3_bits_uop_rob_idx_0 = io_wakeup_ports_3_bits_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_ldq_idx_0 = io_wakeup_ports_3_bits_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_stq_idx_0 = io_wakeup_ports_3_bits_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_rxq_idx_0 = io_wakeup_ports_3_bits_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_3_bits_uop_pdst_0 = io_wakeup_ports_3_bits_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_3_bits_uop_prs1_0 = io_wakeup_ports_3_bits_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_3_bits_uop_prs2_0 = io_wakeup_ports_3_bits_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_3_bits_uop_prs3_0 = io_wakeup_ports_3_bits_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_ppred_0 = io_wakeup_ports_3_bits_uop_ppred; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_prs1_busy_0 = io_wakeup_ports_3_bits_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_prs2_busy_0 = io_wakeup_ports_3_bits_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_prs3_busy_0 = io_wakeup_ports_3_bits_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_ppred_busy_0 = io_wakeup_ports_3_bits_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_3_bits_uop_stale_pdst_0 = io_wakeup_ports_3_bits_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_exception_0 = io_wakeup_ports_3_bits_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_wakeup_ports_3_bits_uop_exc_cause_0 = io_wakeup_ports_3_bits_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_mem_cmd_0 = io_wakeup_ports_3_bits_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_mem_size_0 = io_wakeup_ports_3_bits_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_mem_signed_0 = io_wakeup_ports_3_bits_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_uses_ldq_0 = io_wakeup_ports_3_bits_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_uses_stq_0 = io_wakeup_ports_3_bits_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_unique_0 = io_wakeup_ports_3_bits_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_flush_on_commit_0 = io_wakeup_ports_3_bits_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_csr_cmd_0 = io_wakeup_ports_3_bits_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_ldst_is_rs1_0 = io_wakeup_ports_3_bits_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_3_bits_uop_ldst_0 = io_wakeup_ports_3_bits_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_3_bits_uop_lrs1_0 = io_wakeup_ports_3_bits_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_3_bits_uop_lrs2_0 = io_wakeup_ports_3_bits_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_3_bits_uop_lrs3_0 = io_wakeup_ports_3_bits_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_dst_rtype_0 = io_wakeup_ports_3_bits_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_lrs1_rtype_0 = io_wakeup_ports_3_bits_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_lrs2_rtype_0 = io_wakeup_ports_3_bits_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_frs3_en_0 = io_wakeup_ports_3_bits_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fcn_dw_0 = io_wakeup_ports_3_bits_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_fcn_op_0 = io_wakeup_ports_3_bits_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_val_0 = io_wakeup_ports_3_bits_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_fp_rm_0 = io_wakeup_ports_3_bits_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_fp_typ_0 = io_wakeup_ports_3_bits_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_xcpt_pf_if_0 = io_wakeup_ports_3_bits_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_xcpt_ae_if_0 = io_wakeup_ports_3_bits_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_xcpt_ma_if_0 = io_wakeup_ports_3_bits_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_bp_debug_if_0 = io_wakeup_ports_3_bits_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_bp_xcpt_if_0 = io_wakeup_ports_3_bits_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_debug_fsrc_0 = io_wakeup_ports_3_bits_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_debug_tsrc_0 = io_wakeup_ports_3_bits_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_valid_0 = io_wakeup_ports_4_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_4_bits_uop_inst_0 = io_wakeup_ports_4_bits_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_4_bits_uop_debug_inst_0 = io_wakeup_ports_4_bits_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_rvc_0 = io_wakeup_ports_4_bits_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_wakeup_ports_4_bits_uop_debug_pc_0 = io_wakeup_ports_4_bits_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iq_type_0_0 = io_wakeup_ports_4_bits_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iq_type_1_0 = io_wakeup_ports_4_bits_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iq_type_2_0 = io_wakeup_ports_4_bits_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iq_type_3_0 = io_wakeup_ports_4_bits_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_0_0 = io_wakeup_ports_4_bits_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_1_0 = io_wakeup_ports_4_bits_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_2_0 = io_wakeup_ports_4_bits_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_3_0 = io_wakeup_ports_4_bits_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_4_0 = io_wakeup_ports_4_bits_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_5_0 = io_wakeup_ports_4_bits_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_6_0 = io_wakeup_ports_4_bits_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_7_0 = io_wakeup_ports_4_bits_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_8_0 = io_wakeup_ports_4_bits_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_9_0 = io_wakeup_ports_4_bits_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iw_issued_0 = io_wakeup_ports_4_bits_uop_iw_issued; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_iw_p1_speculative_child_0 = io_wakeup_ports_4_bits_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_iw_p2_speculative_child_0 = io_wakeup_ports_4_bits_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iw_p1_bypass_hint_0 = io_wakeup_ports_4_bits_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iw_p2_bypass_hint_0 = io_wakeup_ports_4_bits_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iw_p3_bypass_hint_0 = io_wakeup_ports_4_bits_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_dis_col_sel_0 = io_wakeup_ports_4_bits_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [15:0] io_wakeup_ports_4_bits_uop_br_mask_0 = io_wakeup_ports_4_bits_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_4_bits_uop_br_tag_0 = io_wakeup_ports_4_bits_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_4_bits_uop_br_type_0 = io_wakeup_ports_4_bits_uop_br_type; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_sfb_0 = io_wakeup_ports_4_bits_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_fence_0 = io_wakeup_ports_4_bits_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_fencei_0 = io_wakeup_ports_4_bits_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_sfence_0 = io_wakeup_ports_4_bits_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_amo_0 = io_wakeup_ports_4_bits_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_eret_0 = io_wakeup_ports_4_bits_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_sys_pc2epc_0 = io_wakeup_ports_4_bits_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_rocc_0 = io_wakeup_ports_4_bits_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_mov_0 = io_wakeup_ports_4_bits_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_4_bits_uop_ftq_idx_0 = io_wakeup_ports_4_bits_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_edge_inst_0 = io_wakeup_ports_4_bits_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_4_bits_uop_pc_lob_0 = io_wakeup_ports_4_bits_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_taken_0 = io_wakeup_ports_4_bits_uop_taken; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_imm_rename_0 = io_wakeup_ports_4_bits_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_imm_sel_0 = io_wakeup_ports_4_bits_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_4_bits_uop_pimm_0 = io_wakeup_ports_4_bits_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_wakeup_ports_4_bits_uop_imm_packed_0 = io_wakeup_ports_4_bits_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_op1_sel_0 = io_wakeup_ports_4_bits_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_op2_sel_0 = io_wakeup_ports_4_bits_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_ldst_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_wen_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_ren1_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_ren2_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_ren3_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_swap12_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_swap23_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_fp_ctrl_typeTagIn_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_fp_ctrl_typeTagOut_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_fromint_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_toint_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_fastpipe_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_fma_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_div_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_sqrt_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_wflags_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_vec_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_4_bits_uop_rob_idx_0 = io_wakeup_ports_4_bits_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_4_bits_uop_ldq_idx_0 = io_wakeup_ports_4_bits_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_4_bits_uop_stq_idx_0 = io_wakeup_ports_4_bits_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_rxq_idx_0 = io_wakeup_ports_4_bits_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_4_bits_uop_pdst_0 = io_wakeup_ports_4_bits_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_4_bits_uop_prs1_0 = io_wakeup_ports_4_bits_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_4_bits_uop_prs2_0 = io_wakeup_ports_4_bits_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_4_bits_uop_prs3_0 = io_wakeup_ports_4_bits_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_4_bits_uop_ppred_0 = io_wakeup_ports_4_bits_uop_ppred; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_prs1_busy_0 = io_wakeup_ports_4_bits_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_prs2_busy_0 = io_wakeup_ports_4_bits_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_prs3_busy_0 = io_wakeup_ports_4_bits_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_ppred_busy_0 = io_wakeup_ports_4_bits_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_4_bits_uop_stale_pdst_0 = io_wakeup_ports_4_bits_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_exception_0 = io_wakeup_ports_4_bits_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_wakeup_ports_4_bits_uop_exc_cause_0 = io_wakeup_ports_4_bits_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_4_bits_uop_mem_cmd_0 = io_wakeup_ports_4_bits_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_mem_size_0 = io_wakeup_ports_4_bits_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_mem_signed_0 = io_wakeup_ports_4_bits_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_uses_ldq_0 = io_wakeup_ports_4_bits_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_uses_stq_0 = io_wakeup_ports_4_bits_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_unique_0 = io_wakeup_ports_4_bits_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_flush_on_commit_0 = io_wakeup_ports_4_bits_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_csr_cmd_0 = io_wakeup_ports_4_bits_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_ldst_is_rs1_0 = io_wakeup_ports_4_bits_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_4_bits_uop_ldst_0 = io_wakeup_ports_4_bits_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_4_bits_uop_lrs1_0 = io_wakeup_ports_4_bits_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_4_bits_uop_lrs2_0 = io_wakeup_ports_4_bits_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_4_bits_uop_lrs3_0 = io_wakeup_ports_4_bits_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_dst_rtype_0 = io_wakeup_ports_4_bits_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_lrs1_rtype_0 = io_wakeup_ports_4_bits_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_lrs2_rtype_0 = io_wakeup_ports_4_bits_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_frs3_en_0 = io_wakeup_ports_4_bits_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fcn_dw_0 = io_wakeup_ports_4_bits_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_4_bits_uop_fcn_op_0 = io_wakeup_ports_4_bits_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_val_0 = io_wakeup_ports_4_bits_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_fp_rm_0 = io_wakeup_ports_4_bits_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_fp_typ_0 = io_wakeup_ports_4_bits_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_xcpt_pf_if_0 = io_wakeup_ports_4_bits_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_xcpt_ae_if_0 = io_wakeup_ports_4_bits_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_xcpt_ma_if_0 = io_wakeup_ports_4_bits_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_bp_debug_if_0 = io_wakeup_ports_4_bits_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_bp_xcpt_if_0 = io_wakeup_ports_4_bits_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_debug_fsrc_0 = io_wakeup_ports_4_bits_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_debug_tsrc_0 = io_wakeup_ports_4_bits_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_pred_wakeup_port_valid_0 = io_pred_wakeup_port_valid; // @[issue-slot.scala:49:7]
wire [4:0] io_pred_wakeup_port_bits_0 = io_pred_wakeup_port_bits; // @[issue-slot.scala:49:7]
wire [2:0] io_child_rebusys_0 = io_child_rebusys; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_bypassable = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_rebusy = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_rebusy = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_rebusy = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_rebusy = 1'h0; // @[issue-slot.scala:49:7]
wire next_uop_out_iw_issued_partial_agen = 1'h0; // @[util.scala:104:23]
wire next_uop_out_iw_issued_partial_dgen = 1'h0; // @[util.scala:104:23]
wire next_uop_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:59:28]
wire next_uop_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:59:28]
wire prs1_rebusys_1 = 1'h0; // @[issue-slot.scala:102:91]
wire prs1_rebusys_2 = 1'h0; // @[issue-slot.scala:102:91]
wire prs1_rebusys_3 = 1'h0; // @[issue-slot.scala:102:91]
wire prs1_rebusys_4 = 1'h0; // @[issue-slot.scala:102:91]
wire prs2_rebusys_1 = 1'h0; // @[issue-slot.scala:103:91]
wire prs2_rebusys_2 = 1'h0; // @[issue-slot.scala:103:91]
wire prs2_rebusys_3 = 1'h0; // @[issue-slot.scala:103:91]
wire prs2_rebusys_4 = 1'h0; // @[issue-slot.scala:103:91]
wire _next_uop_iw_p1_bypass_hint_T_1 = 1'h0; // @[Mux.scala:30:73]
wire _next_uop_iw_p2_bypass_hint_T_1 = 1'h0; // @[Mux.scala:30:73]
wire _next_uop_iw_p3_bypass_hint_T_1 = 1'h0; // @[Mux.scala:30:73]
wire _iss_ready_T_6 = 1'h0; // @[issue-slot.scala:136:131]
wire agen_ready = 1'h0; // @[issue-slot.scala:137:114]
wire dgen_ready = 1'h0; // @[issue-slot.scala:138:114]
wire [2:0] io_wakeup_ports_1_bits_speculative_mask = 3'h0; // @[issue-slot.scala:49:7]
wire [2:0] _next_uop_iw_p1_speculative_child_T_1 = 3'h0; // @[Mux.scala:30:73]
wire [2:0] _next_uop_iw_p2_speculative_child_T_1 = 3'h0; // @[Mux.scala:30:73]
wire io_wakeup_ports_2_bits_bypassable = 1'h1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_bypassable = 1'h1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_bypassable = 1'h1; // @[issue-slot.scala:49:7]
wire _iss_ready_T_7 = 1'h1; // @[issue-slot.scala:136:110]
wire [2:0] io_wakeup_ports_2_bits_speculative_mask = 3'h1; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_speculative_mask = 3'h2; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_speculative_mask = 3'h4; // @[issue-slot.scala:49:7]
wire _io_will_be_valid_T_1; // @[issue-slot.scala:65:34]
wire _io_request_T_4; // @[issue-slot.scala:140:51]
wire [31:0] next_uop_inst; // @[issue-slot.scala:59:28]
wire [31:0] next_uop_debug_inst; // @[issue-slot.scala:59:28]
wire next_uop_is_rvc; // @[issue-slot.scala:59:28]
wire [39:0] next_uop_debug_pc; // @[issue-slot.scala:59:28]
wire next_uop_iq_type_0; // @[issue-slot.scala:59:28]
wire next_uop_iq_type_1; // @[issue-slot.scala:59:28]
wire next_uop_iq_type_2; // @[issue-slot.scala:59:28]
wire next_uop_iq_type_3; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_0; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_1; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_2; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_3; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_4; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_5; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_6; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_7; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_8; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_9; // @[issue-slot.scala:59:28]
wire next_uop_iw_issued; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_iw_p1_speculative_child; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_iw_p2_speculative_child; // @[issue-slot.scala:59:28]
wire next_uop_iw_p1_bypass_hint; // @[issue-slot.scala:59:28]
wire next_uop_iw_p2_bypass_hint; // @[issue-slot.scala:59:28]
wire next_uop_iw_p3_bypass_hint; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_dis_col_sel; // @[issue-slot.scala:59:28]
wire [15:0] next_uop_br_mask; // @[issue-slot.scala:59:28]
wire [3:0] next_uop_br_tag; // @[issue-slot.scala:59:28]
wire [3:0] next_uop_br_type; // @[issue-slot.scala:59:28]
wire next_uop_is_sfb; // @[issue-slot.scala:59:28]
wire next_uop_is_fence; // @[issue-slot.scala:59:28]
wire next_uop_is_fencei; // @[issue-slot.scala:59:28]
wire next_uop_is_sfence; // @[issue-slot.scala:59:28]
wire next_uop_is_amo; // @[issue-slot.scala:59:28]
wire next_uop_is_eret; // @[issue-slot.scala:59:28]
wire next_uop_is_sys_pc2epc; // @[issue-slot.scala:59:28]
wire next_uop_is_rocc; // @[issue-slot.scala:59:28]
wire next_uop_is_mov; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_ftq_idx; // @[issue-slot.scala:59:28]
wire next_uop_edge_inst; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_pc_lob; // @[issue-slot.scala:59:28]
wire next_uop_taken; // @[issue-slot.scala:59:28]
wire next_uop_imm_rename; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_imm_sel; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_pimm; // @[issue-slot.scala:59:28]
wire [19:0] next_uop_imm_packed; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_op1_sel; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_op2_sel; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_ldst; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_wen; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_ren1; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_ren2; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_ren3; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_swap12; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_swap23; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_fromint; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_toint; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_fma; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_div; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_sqrt; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_wflags; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_vec; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_rob_idx; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_ldq_idx; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_stq_idx; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_rxq_idx; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_pdst; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_prs1; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_prs2; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_prs3; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_ppred; // @[issue-slot.scala:59:28]
wire next_uop_prs1_busy; // @[issue-slot.scala:59:28]
wire next_uop_prs2_busy; // @[issue-slot.scala:59:28]
wire next_uop_prs3_busy; // @[issue-slot.scala:59:28]
wire next_uop_ppred_busy; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_stale_pdst; // @[issue-slot.scala:59:28]
wire next_uop_exception; // @[issue-slot.scala:59:28]
wire [63:0] next_uop_exc_cause; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_mem_cmd; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_mem_size; // @[issue-slot.scala:59:28]
wire next_uop_mem_signed; // @[issue-slot.scala:59:28]
wire next_uop_uses_ldq; // @[issue-slot.scala:59:28]
wire next_uop_uses_stq; // @[issue-slot.scala:59:28]
wire next_uop_is_unique; // @[issue-slot.scala:59:28]
wire next_uop_flush_on_commit; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_csr_cmd; // @[issue-slot.scala:59:28]
wire next_uop_ldst_is_rs1; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_ldst; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_lrs1; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_lrs2; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_lrs3; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_dst_rtype; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_lrs1_rtype; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_lrs2_rtype; // @[issue-slot.scala:59:28]
wire next_uop_frs3_en; // @[issue-slot.scala:59:28]
wire next_uop_fcn_dw; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_fcn_op; // @[issue-slot.scala:59:28]
wire next_uop_fp_val; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_fp_rm; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_fp_typ; // @[issue-slot.scala:59:28]
wire next_uop_xcpt_pf_if; // @[issue-slot.scala:59:28]
wire next_uop_xcpt_ae_if; // @[issue-slot.scala:59:28]
wire next_uop_xcpt_ma_if; // @[issue-slot.scala:59:28]
wire next_uop_bp_debug_if; // @[issue-slot.scala:59:28]
wire next_uop_bp_xcpt_if; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_debug_fsrc; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_debug_tsrc; // @[issue-slot.scala:59:28]
wire io_iss_uop_iq_type_0_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iq_type_1_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iq_type_2_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iq_type_3_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_0_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_1_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_2_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_3_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_4_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_5_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_6_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_7_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_8_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_9_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_ldst_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_wen_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_ren1_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_ren2_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_ren3_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_swap12_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_swap23_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_fp_ctrl_typeTagIn_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_fp_ctrl_typeTagOut_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_fromint_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_toint_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_fastpipe_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_fma_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_div_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_sqrt_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_wflags_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_vec_0; // @[issue-slot.scala:49:7]
wire [31:0] io_iss_uop_inst_0; // @[issue-slot.scala:49:7]
wire [31:0] io_iss_uop_debug_inst_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_rvc_0; // @[issue-slot.scala:49:7]
wire [39:0] io_iss_uop_debug_pc_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_issued_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_iw_p1_speculative_child_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_iw_p2_speculative_child_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_p1_bypass_hint_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_p2_bypass_hint_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_p3_bypass_hint_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_dis_col_sel_0; // @[issue-slot.scala:49:7]
wire [15:0] io_iss_uop_br_mask_0; // @[issue-slot.scala:49:7]
wire [3:0] io_iss_uop_br_tag_0; // @[issue-slot.scala:49:7]
wire [3:0] io_iss_uop_br_type_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_sfb_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_fence_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_fencei_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_sfence_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_amo_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_eret_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_sys_pc2epc_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_rocc_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_mov_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_ftq_idx_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_edge_inst_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_pc_lob_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_taken_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_imm_rename_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_imm_sel_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_pimm_0; // @[issue-slot.scala:49:7]
wire [19:0] io_iss_uop_imm_packed_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_op1_sel_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_op2_sel_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_rob_idx_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_ldq_idx_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_stq_idx_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_rxq_idx_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_pdst_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_prs1_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_prs2_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_prs3_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_ppred_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_prs1_busy_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_prs2_busy_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_prs3_busy_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_ppred_busy_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_stale_pdst_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_exception_0; // @[issue-slot.scala:49:7]
wire [63:0] io_iss_uop_exc_cause_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_mem_cmd_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_mem_size_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_mem_signed_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_uses_ldq_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_uses_stq_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_unique_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_flush_on_commit_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_csr_cmd_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_ldst_is_rs1_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_ldst_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_lrs1_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_lrs2_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_lrs3_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_dst_rtype_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_lrs1_rtype_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_lrs2_rtype_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_frs3_en_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fcn_dw_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_fcn_op_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_val_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_fp_rm_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_fp_typ_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_xcpt_pf_if_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_xcpt_ae_if_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_xcpt_ma_if_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_bp_debug_if_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_bp_xcpt_if_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_debug_fsrc_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_debug_tsrc_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iq_type_0_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iq_type_1_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iq_type_2_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iq_type_3_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_0_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_1_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_2_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_3_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_4_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_5_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_6_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_7_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_8_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_9_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_ldst_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_wen_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_ren1_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_ren2_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_ren3_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_swap12_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_swap23_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_fp_ctrl_typeTagIn_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_fp_ctrl_typeTagOut_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_fromint_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_toint_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_fastpipe_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_fma_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_div_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_sqrt_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_wflags_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_vec_0; // @[issue-slot.scala:49:7]
wire [31:0] io_out_uop_inst_0; // @[issue-slot.scala:49:7]
wire [31:0] io_out_uop_debug_inst_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_rvc_0; // @[issue-slot.scala:49:7]
wire [39:0] io_out_uop_debug_pc_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_issued_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_iw_p1_speculative_child_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_iw_p2_speculative_child_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_p1_bypass_hint_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_p2_bypass_hint_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_p3_bypass_hint_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_dis_col_sel_0; // @[issue-slot.scala:49:7]
wire [15:0] io_out_uop_br_mask_0; // @[issue-slot.scala:49:7]
wire [3:0] io_out_uop_br_tag_0; // @[issue-slot.scala:49:7]
wire [3:0] io_out_uop_br_type_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_sfb_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_fence_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_fencei_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_sfence_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_amo_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_eret_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_sys_pc2epc_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_rocc_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_mov_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_ftq_idx_0; // @[issue-slot.scala:49:7]
wire io_out_uop_edge_inst_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_pc_lob_0; // @[issue-slot.scala:49:7]
wire io_out_uop_taken_0; // @[issue-slot.scala:49:7]
wire io_out_uop_imm_rename_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_imm_sel_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_pimm_0; // @[issue-slot.scala:49:7]
wire [19:0] io_out_uop_imm_packed_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_op1_sel_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_op2_sel_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_rob_idx_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_ldq_idx_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_stq_idx_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_rxq_idx_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_pdst_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_prs1_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_prs2_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_prs3_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_ppred_0; // @[issue-slot.scala:49:7]
wire io_out_uop_prs1_busy_0; // @[issue-slot.scala:49:7]
wire io_out_uop_prs2_busy_0; // @[issue-slot.scala:49:7]
wire io_out_uop_prs3_busy_0; // @[issue-slot.scala:49:7]
wire io_out_uop_ppred_busy_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_stale_pdst_0; // @[issue-slot.scala:49:7]
wire io_out_uop_exception_0; // @[issue-slot.scala:49:7]
wire [63:0] io_out_uop_exc_cause_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_mem_cmd_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_mem_size_0; // @[issue-slot.scala:49:7]
wire io_out_uop_mem_signed_0; // @[issue-slot.scala:49:7]
wire io_out_uop_uses_ldq_0; // @[issue-slot.scala:49:7]
wire io_out_uop_uses_stq_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_unique_0; // @[issue-slot.scala:49:7]
wire io_out_uop_flush_on_commit_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_csr_cmd_0; // @[issue-slot.scala:49:7]
wire io_out_uop_ldst_is_rs1_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_ldst_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_lrs1_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_lrs2_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_lrs3_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_dst_rtype_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_lrs1_rtype_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_lrs2_rtype_0; // @[issue-slot.scala:49:7]
wire io_out_uop_frs3_en_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fcn_dw_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_fcn_op_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_val_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_fp_rm_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_fp_typ_0; // @[issue-slot.scala:49:7]
wire io_out_uop_xcpt_pf_if_0; // @[issue-slot.scala:49:7]
wire io_out_uop_xcpt_ae_if_0; // @[issue-slot.scala:49:7]
wire io_out_uop_xcpt_ma_if_0; // @[issue-slot.scala:49:7]
wire io_out_uop_bp_debug_if_0; // @[issue-slot.scala:49:7]
wire io_out_uop_bp_xcpt_if_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_debug_fsrc_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_debug_tsrc_0; // @[issue-slot.scala:49:7]
wire io_valid_0; // @[issue-slot.scala:49:7]
wire io_will_be_valid_0; // @[issue-slot.scala:49:7]
wire io_request_0; // @[issue-slot.scala:49:7]
reg slot_valid; // @[issue-slot.scala:55:27]
assign io_valid_0 = slot_valid; // @[issue-slot.scala:49:7, :55:27]
reg [31:0] slot_uop_inst; // @[issue-slot.scala:56:21]
assign io_iss_uop_inst_0 = slot_uop_inst; // @[issue-slot.scala:49:7, :56:21]
wire [31:0] next_uop_out_inst = slot_uop_inst; // @[util.scala:104:23]
reg [31:0] slot_uop_debug_inst; // @[issue-slot.scala:56:21]
assign io_iss_uop_debug_inst_0 = slot_uop_debug_inst; // @[issue-slot.scala:49:7, :56:21]
wire [31:0] next_uop_out_debug_inst = slot_uop_debug_inst; // @[util.scala:104:23]
reg slot_uop_is_rvc; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_rvc_0 = slot_uop_is_rvc; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_rvc = slot_uop_is_rvc; // @[util.scala:104:23]
reg [39:0] slot_uop_debug_pc; // @[issue-slot.scala:56:21]
assign io_iss_uop_debug_pc_0 = slot_uop_debug_pc; // @[issue-slot.scala:49:7, :56:21]
wire [39:0] next_uop_out_debug_pc = slot_uop_debug_pc; // @[util.scala:104:23]
reg slot_uop_iq_type_0; // @[issue-slot.scala:56:21]
assign io_iss_uop_iq_type_0_0 = slot_uop_iq_type_0; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iq_type_0 = slot_uop_iq_type_0; // @[util.scala:104:23]
reg slot_uop_iq_type_1; // @[issue-slot.scala:56:21]
assign io_iss_uop_iq_type_1_0 = slot_uop_iq_type_1; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iq_type_1 = slot_uop_iq_type_1; // @[util.scala:104:23]
reg slot_uop_iq_type_2; // @[issue-slot.scala:56:21]
assign io_iss_uop_iq_type_2_0 = slot_uop_iq_type_2; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iq_type_2 = slot_uop_iq_type_2; // @[util.scala:104:23]
reg slot_uop_iq_type_3; // @[issue-slot.scala:56:21]
assign io_iss_uop_iq_type_3_0 = slot_uop_iq_type_3; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iq_type_3 = slot_uop_iq_type_3; // @[util.scala:104:23]
reg slot_uop_fu_code_0; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_0_0 = slot_uop_fu_code_0; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_0 = slot_uop_fu_code_0; // @[util.scala:104:23]
reg slot_uop_fu_code_1; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_1_0 = slot_uop_fu_code_1; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_1 = slot_uop_fu_code_1; // @[util.scala:104:23]
reg slot_uop_fu_code_2; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_2_0 = slot_uop_fu_code_2; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_2 = slot_uop_fu_code_2; // @[util.scala:104:23]
reg slot_uop_fu_code_3; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_3_0 = slot_uop_fu_code_3; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_3 = slot_uop_fu_code_3; // @[util.scala:104:23]
reg slot_uop_fu_code_4; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_4_0 = slot_uop_fu_code_4; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_4 = slot_uop_fu_code_4; // @[util.scala:104:23]
reg slot_uop_fu_code_5; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_5_0 = slot_uop_fu_code_5; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_5 = slot_uop_fu_code_5; // @[util.scala:104:23]
reg slot_uop_fu_code_6; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_6_0 = slot_uop_fu_code_6; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_6 = slot_uop_fu_code_6; // @[util.scala:104:23]
reg slot_uop_fu_code_7; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_7_0 = slot_uop_fu_code_7; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_7 = slot_uop_fu_code_7; // @[util.scala:104:23]
reg slot_uop_fu_code_8; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_8_0 = slot_uop_fu_code_8; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_8 = slot_uop_fu_code_8; // @[util.scala:104:23]
reg slot_uop_fu_code_9; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_9_0 = slot_uop_fu_code_9; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_9 = slot_uop_fu_code_9; // @[util.scala:104:23]
reg slot_uop_iw_issued; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_issued_0 = slot_uop_iw_issued; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iw_issued = slot_uop_iw_issued; // @[util.scala:104:23]
reg [2:0] slot_uop_iw_p1_speculative_child; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p1_speculative_child_0 = slot_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_iw_p1_speculative_child = slot_uop_iw_p1_speculative_child; // @[util.scala:104:23]
reg [2:0] slot_uop_iw_p2_speculative_child; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p2_speculative_child_0 = slot_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_iw_p2_speculative_child = slot_uop_iw_p2_speculative_child; // @[util.scala:104:23]
reg slot_uop_iw_p1_bypass_hint; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p1_bypass_hint_0 = slot_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iw_p1_bypass_hint = slot_uop_iw_p1_bypass_hint; // @[util.scala:104:23]
reg slot_uop_iw_p2_bypass_hint; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p2_bypass_hint_0 = slot_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iw_p2_bypass_hint = slot_uop_iw_p2_bypass_hint; // @[util.scala:104:23]
reg slot_uop_iw_p3_bypass_hint; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p3_bypass_hint_0 = slot_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iw_p3_bypass_hint = slot_uop_iw_p3_bypass_hint; // @[util.scala:104:23]
reg [2:0] slot_uop_dis_col_sel; // @[issue-slot.scala:56:21]
assign io_iss_uop_dis_col_sel_0 = slot_uop_dis_col_sel; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_dis_col_sel = slot_uop_dis_col_sel; // @[util.scala:104:23]
reg [15:0] slot_uop_br_mask; // @[issue-slot.scala:56:21]
assign io_iss_uop_br_mask_0 = slot_uop_br_mask; // @[issue-slot.scala:49:7, :56:21]
reg [3:0] slot_uop_br_tag; // @[issue-slot.scala:56:21]
assign io_iss_uop_br_tag_0 = slot_uop_br_tag; // @[issue-slot.scala:49:7, :56:21]
wire [3:0] next_uop_out_br_tag = slot_uop_br_tag; // @[util.scala:104:23]
reg [3:0] slot_uop_br_type; // @[issue-slot.scala:56:21]
assign io_iss_uop_br_type_0 = slot_uop_br_type; // @[issue-slot.scala:49:7, :56:21]
wire [3:0] next_uop_out_br_type = slot_uop_br_type; // @[util.scala:104:23]
reg slot_uop_is_sfb; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_sfb_0 = slot_uop_is_sfb; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_sfb = slot_uop_is_sfb; // @[util.scala:104:23]
reg slot_uop_is_fence; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_fence_0 = slot_uop_is_fence; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_fence = slot_uop_is_fence; // @[util.scala:104:23]
reg slot_uop_is_fencei; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_fencei_0 = slot_uop_is_fencei; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_fencei = slot_uop_is_fencei; // @[util.scala:104:23]
reg slot_uop_is_sfence; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_sfence_0 = slot_uop_is_sfence; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_sfence = slot_uop_is_sfence; // @[util.scala:104:23]
reg slot_uop_is_amo; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_amo_0 = slot_uop_is_amo; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_amo = slot_uop_is_amo; // @[util.scala:104:23]
reg slot_uop_is_eret; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_eret_0 = slot_uop_is_eret; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_eret = slot_uop_is_eret; // @[util.scala:104:23]
reg slot_uop_is_sys_pc2epc; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_sys_pc2epc_0 = slot_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_sys_pc2epc = slot_uop_is_sys_pc2epc; // @[util.scala:104:23]
reg slot_uop_is_rocc; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_rocc_0 = slot_uop_is_rocc; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_rocc = slot_uop_is_rocc; // @[util.scala:104:23]
reg slot_uop_is_mov; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_mov_0 = slot_uop_is_mov; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_mov = slot_uop_is_mov; // @[util.scala:104:23]
reg [4:0] slot_uop_ftq_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_ftq_idx_0 = slot_uop_ftq_idx; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_ftq_idx = slot_uop_ftq_idx; // @[util.scala:104:23]
reg slot_uop_edge_inst; // @[issue-slot.scala:56:21]
assign io_iss_uop_edge_inst_0 = slot_uop_edge_inst; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_edge_inst = slot_uop_edge_inst; // @[util.scala:104:23]
reg [5:0] slot_uop_pc_lob; // @[issue-slot.scala:56:21]
assign io_iss_uop_pc_lob_0 = slot_uop_pc_lob; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_pc_lob = slot_uop_pc_lob; // @[util.scala:104:23]
reg slot_uop_taken; // @[issue-slot.scala:56:21]
assign io_iss_uop_taken_0 = slot_uop_taken; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_taken = slot_uop_taken; // @[util.scala:104:23]
reg slot_uop_imm_rename; // @[issue-slot.scala:56:21]
assign io_iss_uop_imm_rename_0 = slot_uop_imm_rename; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_imm_rename = slot_uop_imm_rename; // @[util.scala:104:23]
reg [2:0] slot_uop_imm_sel; // @[issue-slot.scala:56:21]
assign io_iss_uop_imm_sel_0 = slot_uop_imm_sel; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_imm_sel = slot_uop_imm_sel; // @[util.scala:104:23]
reg [4:0] slot_uop_pimm; // @[issue-slot.scala:56:21]
assign io_iss_uop_pimm_0 = slot_uop_pimm; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_pimm = slot_uop_pimm; // @[util.scala:104:23]
reg [19:0] slot_uop_imm_packed; // @[issue-slot.scala:56:21]
assign io_iss_uop_imm_packed_0 = slot_uop_imm_packed; // @[issue-slot.scala:49:7, :56:21]
wire [19:0] next_uop_out_imm_packed = slot_uop_imm_packed; // @[util.scala:104:23]
reg [1:0] slot_uop_op1_sel; // @[issue-slot.scala:56:21]
assign io_iss_uop_op1_sel_0 = slot_uop_op1_sel; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_op1_sel = slot_uop_op1_sel; // @[util.scala:104:23]
reg [2:0] slot_uop_op2_sel; // @[issue-slot.scala:56:21]
assign io_iss_uop_op2_sel_0 = slot_uop_op2_sel; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_op2_sel = slot_uop_op2_sel; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_ldst; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_ldst_0 = slot_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_ldst = slot_uop_fp_ctrl_ldst; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_wen; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_wen_0 = slot_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_wen = slot_uop_fp_ctrl_wen; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_ren1; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_ren1_0 = slot_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_ren1 = slot_uop_fp_ctrl_ren1; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_ren2; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_ren2_0 = slot_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_ren2 = slot_uop_fp_ctrl_ren2; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_ren3; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_ren3_0 = slot_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_ren3 = slot_uop_fp_ctrl_ren3; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_swap12; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_swap12_0 = slot_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_swap12 = slot_uop_fp_ctrl_swap12; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_swap23; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_swap23_0 = slot_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_swap23 = slot_uop_fp_ctrl_swap23; // @[util.scala:104:23]
reg [1:0] slot_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_typeTagIn_0 = slot_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_fp_ctrl_typeTagIn = slot_uop_fp_ctrl_typeTagIn; // @[util.scala:104:23]
reg [1:0] slot_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_typeTagOut_0 = slot_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_fp_ctrl_typeTagOut = slot_uop_fp_ctrl_typeTagOut; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_fromint; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_fromint_0 = slot_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_fromint = slot_uop_fp_ctrl_fromint; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_toint; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_toint_0 = slot_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_toint = slot_uop_fp_ctrl_toint; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_fastpipe_0 = slot_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_fastpipe = slot_uop_fp_ctrl_fastpipe; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_fma; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_fma_0 = slot_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_fma = slot_uop_fp_ctrl_fma; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_div; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_div_0 = slot_uop_fp_ctrl_div; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_div = slot_uop_fp_ctrl_div; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_sqrt; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_sqrt_0 = slot_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_sqrt = slot_uop_fp_ctrl_sqrt; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_wflags; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_wflags_0 = slot_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_wflags = slot_uop_fp_ctrl_wflags; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_vec; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_vec_0 = slot_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_vec = slot_uop_fp_ctrl_vec; // @[util.scala:104:23]
reg [6:0] slot_uop_rob_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_rob_idx_0 = slot_uop_rob_idx; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_rob_idx = slot_uop_rob_idx; // @[util.scala:104:23]
reg [4:0] slot_uop_ldq_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_ldq_idx_0 = slot_uop_ldq_idx; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_ldq_idx = slot_uop_ldq_idx; // @[util.scala:104:23]
reg [4:0] slot_uop_stq_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_stq_idx_0 = slot_uop_stq_idx; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_stq_idx = slot_uop_stq_idx; // @[util.scala:104:23]
reg [1:0] slot_uop_rxq_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_rxq_idx_0 = slot_uop_rxq_idx; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_rxq_idx = slot_uop_rxq_idx; // @[util.scala:104:23]
reg [6:0] slot_uop_pdst; // @[issue-slot.scala:56:21]
assign io_iss_uop_pdst_0 = slot_uop_pdst; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_pdst = slot_uop_pdst; // @[util.scala:104:23]
reg [6:0] slot_uop_prs1; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs1_0 = slot_uop_prs1; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_prs1 = slot_uop_prs1; // @[util.scala:104:23]
reg [6:0] slot_uop_prs2; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs2_0 = slot_uop_prs2; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_prs2 = slot_uop_prs2; // @[util.scala:104:23]
reg [6:0] slot_uop_prs3; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs3_0 = slot_uop_prs3; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_prs3 = slot_uop_prs3; // @[util.scala:104:23]
reg [4:0] slot_uop_ppred; // @[issue-slot.scala:56:21]
assign io_iss_uop_ppred_0 = slot_uop_ppred; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_ppred = slot_uop_ppred; // @[util.scala:104:23]
reg slot_uop_prs1_busy; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs1_busy_0 = slot_uop_prs1_busy; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_prs1_busy = slot_uop_prs1_busy; // @[util.scala:104:23]
reg slot_uop_prs2_busy; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs2_busy_0 = slot_uop_prs2_busy; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_prs2_busy = slot_uop_prs2_busy; // @[util.scala:104:23]
reg slot_uop_prs3_busy; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs3_busy_0 = slot_uop_prs3_busy; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_prs3_busy = slot_uop_prs3_busy; // @[util.scala:104:23]
reg slot_uop_ppred_busy; // @[issue-slot.scala:56:21]
assign io_iss_uop_ppred_busy_0 = slot_uop_ppred_busy; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_ppred_busy = slot_uop_ppred_busy; // @[util.scala:104:23]
wire _iss_ready_T_3 = slot_uop_ppred_busy; // @[issue-slot.scala:56:21, :136:88]
wire _agen_ready_T_2 = slot_uop_ppred_busy; // @[issue-slot.scala:56:21, :137:95]
wire _dgen_ready_T_2 = slot_uop_ppred_busy; // @[issue-slot.scala:56:21, :138:95]
reg [6:0] slot_uop_stale_pdst; // @[issue-slot.scala:56:21]
assign io_iss_uop_stale_pdst_0 = slot_uop_stale_pdst; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_stale_pdst = slot_uop_stale_pdst; // @[util.scala:104:23]
reg slot_uop_exception; // @[issue-slot.scala:56:21]
assign io_iss_uop_exception_0 = slot_uop_exception; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_exception = slot_uop_exception; // @[util.scala:104:23]
reg [63:0] slot_uop_exc_cause; // @[issue-slot.scala:56:21]
assign io_iss_uop_exc_cause_0 = slot_uop_exc_cause; // @[issue-slot.scala:49:7, :56:21]
wire [63:0] next_uop_out_exc_cause = slot_uop_exc_cause; // @[util.scala:104:23]
reg [4:0] slot_uop_mem_cmd; // @[issue-slot.scala:56:21]
assign io_iss_uop_mem_cmd_0 = slot_uop_mem_cmd; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_mem_cmd = slot_uop_mem_cmd; // @[util.scala:104:23]
reg [1:0] slot_uop_mem_size; // @[issue-slot.scala:56:21]
assign io_iss_uop_mem_size_0 = slot_uop_mem_size; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_mem_size = slot_uop_mem_size; // @[util.scala:104:23]
reg slot_uop_mem_signed; // @[issue-slot.scala:56:21]
assign io_iss_uop_mem_signed_0 = slot_uop_mem_signed; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_mem_signed = slot_uop_mem_signed; // @[util.scala:104:23]
reg slot_uop_uses_ldq; // @[issue-slot.scala:56:21]
assign io_iss_uop_uses_ldq_0 = slot_uop_uses_ldq; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_uses_ldq = slot_uop_uses_ldq; // @[util.scala:104:23]
reg slot_uop_uses_stq; // @[issue-slot.scala:56:21]
assign io_iss_uop_uses_stq_0 = slot_uop_uses_stq; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_uses_stq = slot_uop_uses_stq; // @[util.scala:104:23]
reg slot_uop_is_unique; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_unique_0 = slot_uop_is_unique; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_unique = slot_uop_is_unique; // @[util.scala:104:23]
reg slot_uop_flush_on_commit; // @[issue-slot.scala:56:21]
assign io_iss_uop_flush_on_commit_0 = slot_uop_flush_on_commit; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_flush_on_commit = slot_uop_flush_on_commit; // @[util.scala:104:23]
reg [2:0] slot_uop_csr_cmd; // @[issue-slot.scala:56:21]
assign io_iss_uop_csr_cmd_0 = slot_uop_csr_cmd; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_csr_cmd = slot_uop_csr_cmd; // @[util.scala:104:23]
reg slot_uop_ldst_is_rs1; // @[issue-slot.scala:56:21]
assign io_iss_uop_ldst_is_rs1_0 = slot_uop_ldst_is_rs1; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_ldst_is_rs1 = slot_uop_ldst_is_rs1; // @[util.scala:104:23]
reg [5:0] slot_uop_ldst; // @[issue-slot.scala:56:21]
assign io_iss_uop_ldst_0 = slot_uop_ldst; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_ldst = slot_uop_ldst; // @[util.scala:104:23]
reg [5:0] slot_uop_lrs1; // @[issue-slot.scala:56:21]
assign io_iss_uop_lrs1_0 = slot_uop_lrs1; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_lrs1 = slot_uop_lrs1; // @[util.scala:104:23]
reg [5:0] slot_uop_lrs2; // @[issue-slot.scala:56:21]
assign io_iss_uop_lrs2_0 = slot_uop_lrs2; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_lrs2 = slot_uop_lrs2; // @[util.scala:104:23]
reg [5:0] slot_uop_lrs3; // @[issue-slot.scala:56:21]
assign io_iss_uop_lrs3_0 = slot_uop_lrs3; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_lrs3 = slot_uop_lrs3; // @[util.scala:104:23]
reg [1:0] slot_uop_dst_rtype; // @[issue-slot.scala:56:21]
assign io_iss_uop_dst_rtype_0 = slot_uop_dst_rtype; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_dst_rtype = slot_uop_dst_rtype; // @[util.scala:104:23]
reg [1:0] slot_uop_lrs1_rtype; // @[issue-slot.scala:56:21]
assign io_iss_uop_lrs1_rtype_0 = slot_uop_lrs1_rtype; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_lrs1_rtype = slot_uop_lrs1_rtype; // @[util.scala:104:23]
reg [1:0] slot_uop_lrs2_rtype; // @[issue-slot.scala:56:21]
assign io_iss_uop_lrs2_rtype_0 = slot_uop_lrs2_rtype; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_lrs2_rtype = slot_uop_lrs2_rtype; // @[util.scala:104:23]
reg slot_uop_frs3_en; // @[issue-slot.scala:56:21]
assign io_iss_uop_frs3_en_0 = slot_uop_frs3_en; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_frs3_en = slot_uop_frs3_en; // @[util.scala:104:23]
reg slot_uop_fcn_dw; // @[issue-slot.scala:56:21]
assign io_iss_uop_fcn_dw_0 = slot_uop_fcn_dw; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fcn_dw = slot_uop_fcn_dw; // @[util.scala:104:23]
reg [4:0] slot_uop_fcn_op; // @[issue-slot.scala:56:21]
assign io_iss_uop_fcn_op_0 = slot_uop_fcn_op; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_fcn_op = slot_uop_fcn_op; // @[util.scala:104:23]
reg slot_uop_fp_val; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_val_0 = slot_uop_fp_val; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_val = slot_uop_fp_val; // @[util.scala:104:23]
reg [2:0] slot_uop_fp_rm; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_rm_0 = slot_uop_fp_rm; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_fp_rm = slot_uop_fp_rm; // @[util.scala:104:23]
reg [1:0] slot_uop_fp_typ; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_typ_0 = slot_uop_fp_typ; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_fp_typ = slot_uop_fp_typ; // @[util.scala:104:23]
reg slot_uop_xcpt_pf_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_xcpt_pf_if_0 = slot_uop_xcpt_pf_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_xcpt_pf_if = slot_uop_xcpt_pf_if; // @[util.scala:104:23]
reg slot_uop_xcpt_ae_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_xcpt_ae_if_0 = slot_uop_xcpt_ae_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_xcpt_ae_if = slot_uop_xcpt_ae_if; // @[util.scala:104:23]
reg slot_uop_xcpt_ma_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_xcpt_ma_if_0 = slot_uop_xcpt_ma_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_xcpt_ma_if = slot_uop_xcpt_ma_if; // @[util.scala:104:23]
reg slot_uop_bp_debug_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_bp_debug_if_0 = slot_uop_bp_debug_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_bp_debug_if = slot_uop_bp_debug_if; // @[util.scala:104:23]
reg slot_uop_bp_xcpt_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_bp_xcpt_if_0 = slot_uop_bp_xcpt_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_bp_xcpt_if = slot_uop_bp_xcpt_if; // @[util.scala:104:23]
reg [2:0] slot_uop_debug_fsrc; // @[issue-slot.scala:56:21]
assign io_iss_uop_debug_fsrc_0 = slot_uop_debug_fsrc; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_debug_fsrc = slot_uop_debug_fsrc; // @[util.scala:104:23]
reg [2:0] slot_uop_debug_tsrc; // @[issue-slot.scala:56:21]
assign io_iss_uop_debug_tsrc_0 = slot_uop_debug_tsrc; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_debug_tsrc = slot_uop_debug_tsrc; // @[util.scala:104:23]
wire next_valid; // @[issue-slot.scala:58:28]
assign next_uop_inst = next_uop_out_inst; // @[util.scala:104:23]
assign next_uop_debug_inst = next_uop_out_debug_inst; // @[util.scala:104:23]
assign next_uop_is_rvc = next_uop_out_is_rvc; // @[util.scala:104:23]
assign next_uop_debug_pc = next_uop_out_debug_pc; // @[util.scala:104:23]
assign next_uop_iq_type_0 = next_uop_out_iq_type_0; // @[util.scala:104:23]
assign next_uop_iq_type_1 = next_uop_out_iq_type_1; // @[util.scala:104:23]
assign next_uop_iq_type_2 = next_uop_out_iq_type_2; // @[util.scala:104:23]
assign next_uop_iq_type_3 = next_uop_out_iq_type_3; // @[util.scala:104:23]
assign next_uop_fu_code_0 = next_uop_out_fu_code_0; // @[util.scala:104:23]
assign next_uop_fu_code_1 = next_uop_out_fu_code_1; // @[util.scala:104:23]
assign next_uop_fu_code_2 = next_uop_out_fu_code_2; // @[util.scala:104:23]
assign next_uop_fu_code_3 = next_uop_out_fu_code_3; // @[util.scala:104:23]
assign next_uop_fu_code_4 = next_uop_out_fu_code_4; // @[util.scala:104:23]
assign next_uop_fu_code_5 = next_uop_out_fu_code_5; // @[util.scala:104:23]
assign next_uop_fu_code_6 = next_uop_out_fu_code_6; // @[util.scala:104:23]
assign next_uop_fu_code_7 = next_uop_out_fu_code_7; // @[util.scala:104:23]
assign next_uop_fu_code_8 = next_uop_out_fu_code_8; // @[util.scala:104:23]
assign next_uop_fu_code_9 = next_uop_out_fu_code_9; // @[util.scala:104:23]
wire [15:0] _next_uop_out_br_mask_T_1; // @[util.scala:93:25]
assign next_uop_dis_col_sel = next_uop_out_dis_col_sel; // @[util.scala:104:23]
assign next_uop_br_mask = next_uop_out_br_mask; // @[util.scala:104:23]
assign next_uop_br_tag = next_uop_out_br_tag; // @[util.scala:104:23]
assign next_uop_br_type = next_uop_out_br_type; // @[util.scala:104:23]
assign next_uop_is_sfb = next_uop_out_is_sfb; // @[util.scala:104:23]
assign next_uop_is_fence = next_uop_out_is_fence; // @[util.scala:104:23]
assign next_uop_is_fencei = next_uop_out_is_fencei; // @[util.scala:104:23]
assign next_uop_is_sfence = next_uop_out_is_sfence; // @[util.scala:104:23]
assign next_uop_is_amo = next_uop_out_is_amo; // @[util.scala:104:23]
assign next_uop_is_eret = next_uop_out_is_eret; // @[util.scala:104:23]
assign next_uop_is_sys_pc2epc = next_uop_out_is_sys_pc2epc; // @[util.scala:104:23]
assign next_uop_is_rocc = next_uop_out_is_rocc; // @[util.scala:104:23]
assign next_uop_is_mov = next_uop_out_is_mov; // @[util.scala:104:23]
assign next_uop_ftq_idx = next_uop_out_ftq_idx; // @[util.scala:104:23]
assign next_uop_edge_inst = next_uop_out_edge_inst; // @[util.scala:104:23]
assign next_uop_pc_lob = next_uop_out_pc_lob; // @[util.scala:104:23]
assign next_uop_taken = next_uop_out_taken; // @[util.scala:104:23]
assign next_uop_imm_rename = next_uop_out_imm_rename; // @[util.scala:104:23]
assign next_uop_imm_sel = next_uop_out_imm_sel; // @[util.scala:104:23]
assign next_uop_pimm = next_uop_out_pimm; // @[util.scala:104:23]
assign next_uop_imm_packed = next_uop_out_imm_packed; // @[util.scala:104:23]
assign next_uop_op1_sel = next_uop_out_op1_sel; // @[util.scala:104:23]
assign next_uop_op2_sel = next_uop_out_op2_sel; // @[util.scala:104:23]
assign next_uop_fp_ctrl_ldst = next_uop_out_fp_ctrl_ldst; // @[util.scala:104:23]
assign next_uop_fp_ctrl_wen = next_uop_out_fp_ctrl_wen; // @[util.scala:104:23]
assign next_uop_fp_ctrl_ren1 = next_uop_out_fp_ctrl_ren1; // @[util.scala:104:23]
assign next_uop_fp_ctrl_ren2 = next_uop_out_fp_ctrl_ren2; // @[util.scala:104:23]
assign next_uop_fp_ctrl_ren3 = next_uop_out_fp_ctrl_ren3; // @[util.scala:104:23]
assign next_uop_fp_ctrl_swap12 = next_uop_out_fp_ctrl_swap12; // @[util.scala:104:23]
assign next_uop_fp_ctrl_swap23 = next_uop_out_fp_ctrl_swap23; // @[util.scala:104:23]
assign next_uop_fp_ctrl_typeTagIn = next_uop_out_fp_ctrl_typeTagIn; // @[util.scala:104:23]
assign next_uop_fp_ctrl_typeTagOut = next_uop_out_fp_ctrl_typeTagOut; // @[util.scala:104:23]
assign next_uop_fp_ctrl_fromint = next_uop_out_fp_ctrl_fromint; // @[util.scala:104:23]
assign next_uop_fp_ctrl_toint = next_uop_out_fp_ctrl_toint; // @[util.scala:104:23]
assign next_uop_fp_ctrl_fastpipe = next_uop_out_fp_ctrl_fastpipe; // @[util.scala:104:23]
assign next_uop_fp_ctrl_fma = next_uop_out_fp_ctrl_fma; // @[util.scala:104:23]
assign next_uop_fp_ctrl_div = next_uop_out_fp_ctrl_div; // @[util.scala:104:23]
assign next_uop_fp_ctrl_sqrt = next_uop_out_fp_ctrl_sqrt; // @[util.scala:104:23]
assign next_uop_fp_ctrl_wflags = next_uop_out_fp_ctrl_wflags; // @[util.scala:104:23]
assign next_uop_fp_ctrl_vec = next_uop_out_fp_ctrl_vec; // @[util.scala:104:23]
assign next_uop_rob_idx = next_uop_out_rob_idx; // @[util.scala:104:23]
assign next_uop_ldq_idx = next_uop_out_ldq_idx; // @[util.scala:104:23]
assign next_uop_stq_idx = next_uop_out_stq_idx; // @[util.scala:104:23]
assign next_uop_rxq_idx = next_uop_out_rxq_idx; // @[util.scala:104:23]
assign next_uop_pdst = next_uop_out_pdst; // @[util.scala:104:23]
assign next_uop_prs1 = next_uop_out_prs1; // @[util.scala:104:23]
assign next_uop_prs2 = next_uop_out_prs2; // @[util.scala:104:23]
assign next_uop_prs3 = next_uop_out_prs3; // @[util.scala:104:23]
assign next_uop_ppred = next_uop_out_ppred; // @[util.scala:104:23]
assign next_uop_stale_pdst = next_uop_out_stale_pdst; // @[util.scala:104:23]
assign next_uop_exception = next_uop_out_exception; // @[util.scala:104:23]
assign next_uop_exc_cause = next_uop_out_exc_cause; // @[util.scala:104:23]
assign next_uop_mem_cmd = next_uop_out_mem_cmd; // @[util.scala:104:23]
assign next_uop_mem_size = next_uop_out_mem_size; // @[util.scala:104:23]
assign next_uop_mem_signed = next_uop_out_mem_signed; // @[util.scala:104:23]
assign next_uop_uses_ldq = next_uop_out_uses_ldq; // @[util.scala:104:23]
assign next_uop_uses_stq = next_uop_out_uses_stq; // @[util.scala:104:23]
assign next_uop_is_unique = next_uop_out_is_unique; // @[util.scala:104:23]
assign next_uop_flush_on_commit = next_uop_out_flush_on_commit; // @[util.scala:104:23]
assign next_uop_csr_cmd = next_uop_out_csr_cmd; // @[util.scala:104:23]
assign next_uop_ldst_is_rs1 = next_uop_out_ldst_is_rs1; // @[util.scala:104:23]
assign next_uop_ldst = next_uop_out_ldst; // @[util.scala:104:23]
assign next_uop_lrs1 = next_uop_out_lrs1; // @[util.scala:104:23]
assign next_uop_lrs2 = next_uop_out_lrs2; // @[util.scala:104:23]
assign next_uop_lrs3 = next_uop_out_lrs3; // @[util.scala:104:23]
assign next_uop_dst_rtype = next_uop_out_dst_rtype; // @[util.scala:104:23]
assign next_uop_lrs1_rtype = next_uop_out_lrs1_rtype; // @[util.scala:104:23]
assign next_uop_lrs2_rtype = next_uop_out_lrs2_rtype; // @[util.scala:104:23]
assign next_uop_frs3_en = next_uop_out_frs3_en; // @[util.scala:104:23]
assign next_uop_fcn_dw = next_uop_out_fcn_dw; // @[util.scala:104:23]
assign next_uop_fcn_op = next_uop_out_fcn_op; // @[util.scala:104:23]
assign next_uop_fp_val = next_uop_out_fp_val; // @[util.scala:104:23]
assign next_uop_fp_rm = next_uop_out_fp_rm; // @[util.scala:104:23]
assign next_uop_fp_typ = next_uop_out_fp_typ; // @[util.scala:104:23]
assign next_uop_xcpt_pf_if = next_uop_out_xcpt_pf_if; // @[util.scala:104:23]
assign next_uop_xcpt_ae_if = next_uop_out_xcpt_ae_if; // @[util.scala:104:23]
assign next_uop_xcpt_ma_if = next_uop_out_xcpt_ma_if; // @[util.scala:104:23]
assign next_uop_bp_debug_if = next_uop_out_bp_debug_if; // @[util.scala:104:23]
assign next_uop_bp_xcpt_if = next_uop_out_bp_xcpt_if; // @[util.scala:104:23]
assign next_uop_debug_fsrc = next_uop_out_debug_fsrc; // @[util.scala:104:23]
assign next_uop_debug_tsrc = next_uop_out_debug_tsrc; // @[util.scala:104:23]
wire [15:0] _next_uop_out_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:93:27]
assign _next_uop_out_br_mask_T_1 = slot_uop_br_mask & _next_uop_out_br_mask_T; // @[util.scala:93:{25,27}]
assign next_uop_out_br_mask = _next_uop_out_br_mask_T_1; // @[util.scala:93:25, :104:23]
assign io_out_uop_inst_0 = next_uop_inst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_debug_inst_0 = next_uop_debug_inst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_rvc_0 = next_uop_is_rvc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_debug_pc_0 = next_uop_debug_pc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iq_type_0_0 = next_uop_iq_type_0; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iq_type_1_0 = next_uop_iq_type_1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iq_type_2_0 = next_uop_iq_type_2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iq_type_3_0 = next_uop_iq_type_3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_0_0 = next_uop_fu_code_0; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_1_0 = next_uop_fu_code_1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_2_0 = next_uop_fu_code_2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_3_0 = next_uop_fu_code_3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_4_0 = next_uop_fu_code_4; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_5_0 = next_uop_fu_code_5; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_6_0 = next_uop_fu_code_6; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_7_0 = next_uop_fu_code_7; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_8_0 = next_uop_fu_code_8; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_9_0 = next_uop_fu_code_9; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_issued_0 = next_uop_iw_issued; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_p1_speculative_child_0 = next_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_p2_speculative_child_0 = next_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_p1_bypass_hint_0 = next_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_p2_bypass_hint_0 = next_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_p3_bypass_hint_0 = next_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_dis_col_sel_0 = next_uop_dis_col_sel; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_br_mask_0 = next_uop_br_mask; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_br_tag_0 = next_uop_br_tag; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_br_type_0 = next_uop_br_type; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_sfb_0 = next_uop_is_sfb; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_fence_0 = next_uop_is_fence; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_fencei_0 = next_uop_is_fencei; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_sfence_0 = next_uop_is_sfence; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_amo_0 = next_uop_is_amo; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_eret_0 = next_uop_is_eret; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_sys_pc2epc_0 = next_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_rocc_0 = next_uop_is_rocc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_mov_0 = next_uop_is_mov; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ftq_idx_0 = next_uop_ftq_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_edge_inst_0 = next_uop_edge_inst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_pc_lob_0 = next_uop_pc_lob; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_taken_0 = next_uop_taken; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_imm_rename_0 = next_uop_imm_rename; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_imm_sel_0 = next_uop_imm_sel; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_pimm_0 = next_uop_pimm; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_imm_packed_0 = next_uop_imm_packed; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_op1_sel_0 = next_uop_op1_sel; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_op2_sel_0 = next_uop_op2_sel; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_ldst_0 = next_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_wen_0 = next_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_ren1_0 = next_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_ren2_0 = next_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_ren3_0 = next_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_swap12_0 = next_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_swap23_0 = next_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_typeTagIn_0 = next_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_typeTagOut_0 = next_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_fromint_0 = next_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_toint_0 = next_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_fastpipe_0 = next_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_fma_0 = next_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_div_0 = next_uop_fp_ctrl_div; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_sqrt_0 = next_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_wflags_0 = next_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_vec_0 = next_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_rob_idx_0 = next_uop_rob_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ldq_idx_0 = next_uop_ldq_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_stq_idx_0 = next_uop_stq_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_rxq_idx_0 = next_uop_rxq_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_pdst_0 = next_uop_pdst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs1_0 = next_uop_prs1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs2_0 = next_uop_prs2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs3_0 = next_uop_prs3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ppred_0 = next_uop_ppred; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs1_busy_0 = next_uop_prs1_busy; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs2_busy_0 = next_uop_prs2_busy; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs3_busy_0 = next_uop_prs3_busy; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ppred_busy_0 = next_uop_ppred_busy; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_stale_pdst_0 = next_uop_stale_pdst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_exception_0 = next_uop_exception; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_exc_cause_0 = next_uop_exc_cause; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_mem_cmd_0 = next_uop_mem_cmd; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_mem_size_0 = next_uop_mem_size; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_mem_signed_0 = next_uop_mem_signed; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_uses_ldq_0 = next_uop_uses_ldq; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_uses_stq_0 = next_uop_uses_stq; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_unique_0 = next_uop_is_unique; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_flush_on_commit_0 = next_uop_flush_on_commit; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_csr_cmd_0 = next_uop_csr_cmd; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ldst_is_rs1_0 = next_uop_ldst_is_rs1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ldst_0 = next_uop_ldst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs1_0 = next_uop_lrs1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs2_0 = next_uop_lrs2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs3_0 = next_uop_lrs3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_dst_rtype_0 = next_uop_dst_rtype; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs1_rtype_0 = next_uop_lrs1_rtype; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs2_rtype_0 = next_uop_lrs2_rtype; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_frs3_en_0 = next_uop_frs3_en; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fcn_dw_0 = next_uop_fcn_dw; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fcn_op_0 = next_uop_fcn_op; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_val_0 = next_uop_fp_val; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_rm_0 = next_uop_fp_rm; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_typ_0 = next_uop_fp_typ; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_xcpt_pf_if_0 = next_uop_xcpt_pf_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_xcpt_ae_if_0 = next_uop_xcpt_ae_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_xcpt_ma_if_0 = next_uop_xcpt_ma_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_bp_debug_if_0 = next_uop_bp_debug_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_bp_xcpt_if_0 = next_uop_bp_xcpt_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_debug_fsrc_0 = next_uop_debug_fsrc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_debug_tsrc_0 = next_uop_debug_tsrc; // @[issue-slot.scala:49:7, :59:28]
wire [15:0] _killed_T = io_brupdate_b1_mispredict_mask_0 & slot_uop_br_mask; // @[util.scala:126:51]
wire _killed_T_1 = |_killed_T; // @[util.scala:126:{51,59}]
wire killed = _killed_T_1 | io_kill_0; // @[util.scala:61:61, :126:59]
wire _io_will_be_valid_T = ~killed; // @[util.scala:61:61]
assign _io_will_be_valid_T_1 = next_valid & _io_will_be_valid_T; // @[issue-slot.scala:58:28, :65:{34,37}]
assign io_will_be_valid_0 = _io_will_be_valid_T_1; // @[issue-slot.scala:49:7, :65:34]
wire _slot_valid_T = ~killed; // @[util.scala:61:61]
wire _slot_valid_T_1 = next_valid & _slot_valid_T; // @[issue-slot.scala:58:28, :74:{30,33}] |
Generate the Verilog code corresponding to the following Chisel files.
File FPU.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tile
import chisel3._
import chisel3.util._
import chisel3.{DontCare, WireInit, withClock, withReset}
import chisel3.experimental.SourceInfo
import chisel3.experimental.dataview._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.rocket._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property
case class FPUParams(
minFLen: Int = 32,
fLen: Int = 64,
divSqrt: Boolean = true,
sfmaLatency: Int = 3,
dfmaLatency: Int = 4,
fpmuLatency: Int = 2,
ifpuLatency: Int = 2
)
object FPConstants
{
val RM_SZ = 3
val FLAGS_SZ = 5
}
trait HasFPUCtrlSigs {
val ldst = Bool()
val wen = Bool()
val ren1 = Bool()
val ren2 = Bool()
val ren3 = Bool()
val swap12 = Bool()
val swap23 = Bool()
val typeTagIn = UInt(2.W)
val typeTagOut = UInt(2.W)
val fromint = Bool()
val toint = Bool()
val fastpipe = Bool()
val fma = Bool()
val div = Bool()
val sqrt = Bool()
val wflags = Bool()
val vec = Bool()
}
class FPUCtrlSigs extends Bundle with HasFPUCtrlSigs
class FPUDecoder(implicit p: Parameters) extends FPUModule()(p) {
val io = IO(new Bundle {
val inst = Input(Bits(32.W))
val sigs = Output(new FPUCtrlSigs())
})
private val X2 = BitPat.dontCare(2)
val default = List(X,X,X,X,X,X,X,X2,X2,X,X,X,X,X,X,X,N)
val h: Array[(BitPat, List[BitPat])] =
Array(FLH -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSH -> List(Y,N,N,Y,N,Y,X, I, H,N,Y,N,N,N,N,N,N),
FMV_H_X -> List(N,Y,N,N,N,X,X, H, I,Y,N,N,N,N,N,N,N),
FCVT_H_W -> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_WU-> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_L -> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_LU-> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FMV_X_H -> List(N,N,Y,N,N,N,X, I, H,N,Y,N,N,N,N,N,N),
FCLASS_H -> List(N,N,Y,N,N,N,X, H, H,N,Y,N,N,N,N,N,N),
FCVT_W_H -> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_H-> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_H -> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_H-> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_S_H -> List(N,Y,Y,N,N,N,X, H, S,N,N,Y,N,N,N,Y,N),
FCVT_H_S -> List(N,Y,Y,N,N,N,X, S, H,N,N,Y,N,N,N,Y,N),
FEQ_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FLT_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FLE_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FSGNJ_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FSGNJN_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FSGNJX_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FMIN_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,Y,N),
FMAX_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,Y,N),
FADD_H -> List(N,Y,Y,Y,N,N,Y, H, H,N,N,N,Y,N,N,Y,N),
FSUB_H -> List(N,Y,Y,Y,N,N,Y, H, H,N,N,N,Y,N,N,Y,N),
FMUL_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,N,Y,N,N,Y,N),
FMADD_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FMSUB_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FNMADD_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FNMSUB_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FDIV_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,N,N,Y,N,Y,N),
FSQRT_H -> List(N,Y,Y,N,N,N,X, H, H,N,N,N,N,N,Y,Y,N))
val f: Array[(BitPat, List[BitPat])] =
Array(FLW -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSW -> List(Y,N,N,Y,N,Y,X, I, S,N,Y,N,N,N,N,N,N),
FMV_W_X -> List(N,Y,N,N,N,X,X, S, I,Y,N,N,N,N,N,N,N),
FCVT_S_W -> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_WU-> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_L -> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_LU-> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FMV_X_W -> List(N,N,Y,N,N,N,X, I, S,N,Y,N,N,N,N,N,N),
FCLASS_S -> List(N,N,Y,N,N,N,X, S, S,N,Y,N,N,N,N,N,N),
FCVT_W_S -> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_S-> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_S -> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_S-> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FEQ_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FLT_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FLE_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FSGNJ_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FSGNJN_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FSGNJX_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FMIN_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,Y,N),
FMAX_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,Y,N),
FADD_S -> List(N,Y,Y,Y,N,N,Y, S, S,N,N,N,Y,N,N,Y,N),
FSUB_S -> List(N,Y,Y,Y,N,N,Y, S, S,N,N,N,Y,N,N,Y,N),
FMUL_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,N,Y,N,N,Y,N),
FMADD_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FMSUB_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FNMADD_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FNMSUB_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FDIV_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,N,N,Y,N,Y,N),
FSQRT_S -> List(N,Y,Y,N,N,N,X, S, S,N,N,N,N,N,Y,Y,N))
val d: Array[(BitPat, List[BitPat])] =
Array(FLD -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSD -> List(Y,N,N,Y,N,Y,X, I, D,N,Y,N,N,N,N,N,N),
FMV_D_X -> List(N,Y,N,N,N,X,X, D, I,Y,N,N,N,N,N,N,N),
FCVT_D_W -> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_WU-> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_L -> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_LU-> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FMV_X_D -> List(N,N,Y,N,N,N,X, I, D,N,Y,N,N,N,N,N,N),
FCLASS_D -> List(N,N,Y,N,N,N,X, D, D,N,Y,N,N,N,N,N,N),
FCVT_W_D -> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_D-> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_D -> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_D-> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_S_D -> List(N,Y,Y,N,N,N,X, D, S,N,N,Y,N,N,N,Y,N),
FCVT_D_S -> List(N,Y,Y,N,N,N,X, S, D,N,N,Y,N,N,N,Y,N),
FEQ_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FLT_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FLE_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FSGNJ_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FSGNJN_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FSGNJX_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FMIN_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,Y,N),
FMAX_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,Y,N),
FADD_D -> List(N,Y,Y,Y,N,N,Y, D, D,N,N,N,Y,N,N,Y,N),
FSUB_D -> List(N,Y,Y,Y,N,N,Y, D, D,N,N,N,Y,N,N,Y,N),
FMUL_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,N,Y,N,N,Y,N),
FMADD_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FMSUB_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FNMADD_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FNMSUB_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FDIV_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,N,N,Y,N,Y,N),
FSQRT_D -> List(N,Y,Y,N,N,N,X, D, D,N,N,N,N,N,Y,Y,N))
val fcvt_hd: Array[(BitPat, List[BitPat])] =
Array(FCVT_H_D -> List(N,Y,Y,N,N,N,X, D, H,N,N,Y,N,N,N,Y,N),
FCVT_D_H -> List(N,Y,Y,N,N,N,X, H, D,N,N,Y,N,N,N,Y,N))
val vfmv_f_s: Array[(BitPat, List[BitPat])] =
Array(VFMV_F_S -> List(N,Y,N,N,N,N,X,X2,X2,N,N,N,N,N,N,N,Y))
val insns = ((minFLen, fLen) match {
case (32, 32) => f
case (16, 32) => h ++ f
case (32, 64) => f ++ d
case (16, 64) => h ++ f ++ d ++ fcvt_hd
case other => throw new Exception(s"minFLen = ${minFLen} & fLen = ${fLen} is an unsupported configuration")
}) ++ (if (usingVector) vfmv_f_s else Array[(BitPat, List[BitPat])]())
val decoder = DecodeLogic(io.inst, default, insns)
val s = io.sigs
val sigs = Seq(s.ldst, s.wen, s.ren1, s.ren2, s.ren3, s.swap12,
s.swap23, s.typeTagIn, s.typeTagOut, s.fromint, s.toint,
s.fastpipe, s.fma, s.div, s.sqrt, s.wflags, s.vec)
sigs zip decoder map {case(s,d) => s := d}
}
class FPUCoreIO(implicit p: Parameters) extends CoreBundle()(p) {
val hartid = Input(UInt(hartIdLen.W))
val time = Input(UInt(xLen.W))
val inst = Input(Bits(32.W))
val fromint_data = Input(Bits(xLen.W))
val fcsr_rm = Input(Bits(FPConstants.RM_SZ.W))
val fcsr_flags = Valid(Bits(FPConstants.FLAGS_SZ.W))
val v_sew = Input(UInt(3.W))
val store_data = Output(Bits(fLen.W))
val toint_data = Output(Bits(xLen.W))
val ll_resp_val = Input(Bool())
val ll_resp_type = Input(Bits(3.W))
val ll_resp_tag = Input(UInt(5.W))
val ll_resp_data = Input(Bits(fLen.W))
val valid = Input(Bool())
val fcsr_rdy = Output(Bool())
val nack_mem = Output(Bool())
val illegal_rm = Output(Bool())
val killx = Input(Bool())
val killm = Input(Bool())
val dec = Output(new FPUCtrlSigs())
val sboard_set = Output(Bool())
val sboard_clr = Output(Bool())
val sboard_clra = Output(UInt(5.W))
val keep_clock_enabled = Input(Bool())
}
class FPUIO(implicit p: Parameters) extends FPUCoreIO ()(p) {
val cp_req = Flipped(Decoupled(new FPInput())) //cp doesn't pay attn to kill sigs
val cp_resp = Decoupled(new FPResult())
}
class FPResult(implicit p: Parameters) extends CoreBundle()(p) {
val data = Bits((fLen+1).W)
val exc = Bits(FPConstants.FLAGS_SZ.W)
}
class IntToFPInput(implicit p: Parameters) extends CoreBundle()(p) with HasFPUCtrlSigs {
val rm = Bits(FPConstants.RM_SZ.W)
val typ = Bits(2.W)
val in1 = Bits(xLen.W)
}
class FPInput(implicit p: Parameters) extends CoreBundle()(p) with HasFPUCtrlSigs {
val rm = Bits(FPConstants.RM_SZ.W)
val fmaCmd = Bits(2.W)
val typ = Bits(2.W)
val fmt = Bits(2.W)
val in1 = Bits((fLen+1).W)
val in2 = Bits((fLen+1).W)
val in3 = Bits((fLen+1).W)
}
case class FType(exp: Int, sig: Int) {
def ieeeWidth = exp + sig
def recodedWidth = ieeeWidth + 1
def ieeeQNaN = ((BigInt(1) << (ieeeWidth - 1)) - (BigInt(1) << (sig - 2))).U(ieeeWidth.W)
def qNaN = ((BigInt(7) << (exp + sig - 3)) + (BigInt(1) << (sig - 2))).U(recodedWidth.W)
def isNaN(x: UInt) = x(sig + exp - 1, sig + exp - 3).andR
def isSNaN(x: UInt) = isNaN(x) && !x(sig - 2)
def classify(x: UInt) = {
val sign = x(sig + exp)
val code = x(exp + sig - 1, exp + sig - 3)
val codeHi = code(2, 1)
val isSpecial = codeHi === 3.U
val isHighSubnormalIn = x(exp + sig - 3, sig - 1) < 2.U
val isSubnormal = code === 1.U || codeHi === 1.U && isHighSubnormalIn
val isNormal = codeHi === 1.U && !isHighSubnormalIn || codeHi === 2.U
val isZero = code === 0.U
val isInf = isSpecial && !code(0)
val isNaN = code.andR
val isSNaN = isNaN && !x(sig-2)
val isQNaN = isNaN && x(sig-2)
Cat(isQNaN, isSNaN, isInf && !sign, isNormal && !sign,
isSubnormal && !sign, isZero && !sign, isZero && sign,
isSubnormal && sign, isNormal && sign, isInf && sign)
}
// convert between formats, ignoring rounding, range, NaN
def unsafeConvert(x: UInt, to: FType) = if (this == to) x else {
val sign = x(sig + exp)
val fractIn = x(sig - 2, 0)
val expIn = x(sig + exp - 1, sig - 1)
val fractOut = fractIn << to.sig >> sig
val expOut = {
val expCode = expIn(exp, exp - 2)
val commonCase = (expIn + (1 << to.exp).U) - (1 << exp).U
Mux(expCode === 0.U || expCode >= 6.U, Cat(expCode, commonCase(to.exp - 3, 0)), commonCase(to.exp, 0))
}
Cat(sign, expOut, fractOut)
}
private def ieeeBundle = {
val expWidth = exp
class IEEEBundle extends Bundle {
val sign = Bool()
val exp = UInt(expWidth.W)
val sig = UInt((ieeeWidth-expWidth-1).W)
}
new IEEEBundle
}
def unpackIEEE(x: UInt) = x.asTypeOf(ieeeBundle)
def recode(x: UInt) = hardfloat.recFNFromFN(exp, sig, x)
def ieee(x: UInt) = hardfloat.fNFromRecFN(exp, sig, x)
}
object FType {
val H = new FType(5, 11)
val S = new FType(8, 24)
val D = new FType(11, 53)
val all = List(H, S, D)
}
trait HasFPUParameters {
require(fLen == 0 || FType.all.exists(_.ieeeWidth == fLen))
val minFLen: Int
val fLen: Int
def xLen: Int
val minXLen = 32
val nIntTypes = log2Ceil(xLen/minXLen) + 1
def floatTypes = FType.all.filter(t => minFLen <= t.ieeeWidth && t.ieeeWidth <= fLen)
def minType = floatTypes.head
def maxType = floatTypes.last
def prevType(t: FType) = floatTypes(typeTag(t) - 1)
def maxExpWidth = maxType.exp
def maxSigWidth = maxType.sig
def typeTag(t: FType) = floatTypes.indexOf(t)
def typeTagWbOffset = (FType.all.indexOf(minType) + 1).U
def typeTagGroup(t: FType) = (if (floatTypes.contains(t)) typeTag(t) else typeTag(maxType)).U
// typeTag
def H = typeTagGroup(FType.H)
def S = typeTagGroup(FType.S)
def D = typeTagGroup(FType.D)
def I = typeTag(maxType).U
private def isBox(x: UInt, t: FType): Bool = x(t.sig + t.exp, t.sig + t.exp - 4).andR
private def box(x: UInt, xt: FType, y: UInt, yt: FType): UInt = {
require(xt.ieeeWidth == 2 * yt.ieeeWidth)
val swizzledNaN = Cat(
x(xt.sig + xt.exp, xt.sig + xt.exp - 3),
x(xt.sig - 2, yt.recodedWidth - 1).andR,
x(xt.sig + xt.exp - 5, xt.sig),
y(yt.recodedWidth - 2),
x(xt.sig - 2, yt.recodedWidth - 1),
y(yt.recodedWidth - 1),
y(yt.recodedWidth - 3, 0))
Mux(xt.isNaN(x), swizzledNaN, x)
}
// implement NaN unboxing for FU inputs
def unbox(x: UInt, tag: UInt, exactType: Option[FType]): UInt = {
val outType = exactType.getOrElse(maxType)
def helper(x: UInt, t: FType): Seq[(Bool, UInt)] = {
val prev =
if (t == minType) {
Seq()
} else {
val prevT = prevType(t)
val unswizzled = Cat(
x(prevT.sig + prevT.exp - 1),
x(t.sig - 1),
x(prevT.sig + prevT.exp - 2, 0))
val prev = helper(unswizzled, prevT)
val isbox = isBox(x, t)
prev.map(p => (isbox && p._1, p._2))
}
prev :+ (true.B, t.unsafeConvert(x, outType))
}
val (oks, floats) = helper(x, maxType).unzip
if (exactType.isEmpty || floatTypes.size == 1) {
Mux(oks(tag), floats(tag), maxType.qNaN)
} else {
val t = exactType.get
floats(typeTag(t)) | Mux(oks(typeTag(t)), 0.U, t.qNaN)
}
}
// make sure that the redundant bits in the NaN-boxed encoding are consistent
def consistent(x: UInt): Bool = {
def helper(x: UInt, t: FType): Bool = if (typeTag(t) == 0) true.B else {
val prevT = prevType(t)
val unswizzled = Cat(
x(prevT.sig + prevT.exp - 1),
x(t.sig - 1),
x(prevT.sig + prevT.exp - 2, 0))
val prevOK = !isBox(x, t) || helper(unswizzled, prevT)
val curOK = !t.isNaN(x) || x(t.sig + t.exp - 4) === x(t.sig - 2, prevT.recodedWidth - 1).andR
prevOK && curOK
}
helper(x, maxType)
}
// generate a NaN box from an FU result
def box(x: UInt, t: FType): UInt = {
if (t == maxType) {
x
} else {
val nt = floatTypes(typeTag(t) + 1)
val bigger = box(((BigInt(1) << nt.recodedWidth)-1).U, nt, x, t)
bigger | ((BigInt(1) << maxType.recodedWidth) - (BigInt(1) << nt.recodedWidth)).U
}
}
// generate a NaN box from an FU result
def box(x: UInt, tag: UInt): UInt = {
val opts = floatTypes.map(t => box(x, t))
opts(tag)
}
// zap bits that hardfloat thinks are don't-cares, but we do care about
def sanitizeNaN(x: UInt, t: FType): UInt = {
if (typeTag(t) == 0) {
x
} else {
val maskedNaN = x & ~((BigInt(1) << (t.sig-1)) | (BigInt(1) << (t.sig+t.exp-4))).U(t.recodedWidth.W)
Mux(t.isNaN(x), maskedNaN, x)
}
}
// implement NaN boxing and recoding for FL*/fmv.*.x
def recode(x: UInt, tag: UInt): UInt = {
def helper(x: UInt, t: FType): UInt = {
if (typeTag(t) == 0) {
t.recode(x)
} else {
val prevT = prevType(t)
box(t.recode(x), t, helper(x, prevT), prevT)
}
}
// fill MSBs of subword loads to emulate a wider load of a NaN-boxed value
val boxes = floatTypes.map(t => ((BigInt(1) << maxType.ieeeWidth) - (BigInt(1) << t.ieeeWidth)).U)
helper(boxes(tag) | x, maxType)
}
// implement NaN unboxing and un-recoding for FS*/fmv.x.*
def ieee(x: UInt, t: FType = maxType): UInt = {
if (typeTag(t) == 0) {
t.ieee(x)
} else {
val unrecoded = t.ieee(x)
val prevT = prevType(t)
val prevRecoded = Cat(
x(prevT.recodedWidth-2),
x(t.sig-1),
x(prevT.recodedWidth-3, 0))
val prevUnrecoded = ieee(prevRecoded, prevT)
Cat(unrecoded >> prevT.ieeeWidth, Mux(t.isNaN(x), prevUnrecoded, unrecoded(prevT.ieeeWidth-1, 0)))
}
}
}
abstract class FPUModule(implicit val p: Parameters) extends Module with HasCoreParameters with HasFPUParameters
class FPToInt(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
class Output extends Bundle {
val in = new FPInput
val lt = Bool()
val store = Bits(fLen.W)
val toint = Bits(xLen.W)
val exc = Bits(FPConstants.FLAGS_SZ.W)
}
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new Output)
})
val in = RegEnable(io.in.bits, io.in.valid)
val valid = RegNext(io.in.valid)
val dcmp = Module(new hardfloat.CompareRecFN(maxExpWidth, maxSigWidth))
dcmp.io.a := in.in1
dcmp.io.b := in.in2
dcmp.io.signaling := !in.rm(1)
val tag = in.typeTagOut
val toint_ieee = (floatTypes.map(t => if (t == FType.H) Fill(maxType.ieeeWidth / minXLen, ieee(in.in1)(15, 0).sextTo(minXLen))
else Fill(maxType.ieeeWidth / t.ieeeWidth, ieee(in.in1)(t.ieeeWidth - 1, 0))): Seq[UInt])(tag)
val toint = WireDefault(toint_ieee)
val intType = WireDefault(in.fmt(0))
io.out.bits.store := (floatTypes.map(t => Fill(fLen / t.ieeeWidth, ieee(in.in1)(t.ieeeWidth - 1, 0))): Seq[UInt])(tag)
io.out.bits.toint := ((0 until nIntTypes).map(i => toint((minXLen << i) - 1, 0).sextTo(xLen)): Seq[UInt])(intType)
io.out.bits.exc := 0.U
when (in.rm(0)) {
val classify_out = (floatTypes.map(t => t.classify(maxType.unsafeConvert(in.in1, t))): Seq[UInt])(tag)
toint := classify_out | (toint_ieee >> minXLen << minXLen)
intType := false.B
}
when (in.wflags) { // feq/flt/fle, fcvt
toint := (~in.rm & Cat(dcmp.io.lt, dcmp.io.eq)).orR | (toint_ieee >> minXLen << minXLen)
io.out.bits.exc := dcmp.io.exceptionFlags
intType := false.B
when (!in.ren2) { // fcvt
val cvtType = in.typ.extract(log2Ceil(nIntTypes), 1)
intType := cvtType
val conv = Module(new hardfloat.RecFNToIN(maxExpWidth, maxSigWidth, xLen))
conv.io.in := in.in1
conv.io.roundingMode := in.rm
conv.io.signedOut := ~in.typ(0)
toint := conv.io.out
io.out.bits.exc := Cat(conv.io.intExceptionFlags(2, 1).orR, 0.U(3.W), conv.io.intExceptionFlags(0))
for (i <- 0 until nIntTypes-1) {
val w = minXLen << i
when (cvtType === i.U) {
val narrow = Module(new hardfloat.RecFNToIN(maxExpWidth, maxSigWidth, w))
narrow.io.in := in.in1
narrow.io.roundingMode := in.rm
narrow.io.signedOut := ~in.typ(0)
val excSign = in.in1(maxExpWidth + maxSigWidth) && !maxType.isNaN(in.in1)
val excOut = Cat(conv.io.signedOut === excSign, Fill(w-1, !excSign))
val invalid = conv.io.intExceptionFlags(2) || narrow.io.intExceptionFlags(1)
when (invalid) { toint := Cat(conv.io.out >> w, excOut) }
io.out.bits.exc := Cat(invalid, 0.U(3.W), !invalid && conv.io.intExceptionFlags(0))
}
}
}
}
io.out.valid := valid
io.out.bits.lt := dcmp.io.lt || (dcmp.io.a.asSInt < 0.S && dcmp.io.b.asSInt >= 0.S)
io.out.bits.in := in
}
class IntToFP(val latency: Int)(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
val io = IO(new Bundle {
val in = Flipped(Valid(new IntToFPInput))
val out = Valid(new FPResult)
})
val in = Pipe(io.in)
val tag = in.bits.typeTagIn
val mux = Wire(new FPResult)
mux.exc := 0.U
mux.data := recode(in.bits.in1, tag)
val intValue = {
val res = WireDefault(in.bits.in1.asSInt)
for (i <- 0 until nIntTypes-1) {
val smallInt = in.bits.in1((minXLen << i) - 1, 0)
when (in.bits.typ.extract(log2Ceil(nIntTypes), 1) === i.U) {
res := Mux(in.bits.typ(0), smallInt.zext, smallInt.asSInt)
}
}
res.asUInt
}
when (in.bits.wflags) { // fcvt
// could be improved for RVD/RVQ with a single variable-position rounding
// unit, rather than N fixed-position ones
val i2fResults = for (t <- floatTypes) yield {
val i2f = Module(new hardfloat.INToRecFN(xLen, t.exp, t.sig))
i2f.io.signedIn := ~in.bits.typ(0)
i2f.io.in := intValue
i2f.io.roundingMode := in.bits.rm
i2f.io.detectTininess := hardfloat.consts.tininess_afterRounding
(sanitizeNaN(i2f.io.out, t), i2f.io.exceptionFlags)
}
val (data, exc) = i2fResults.unzip
val dataPadded = data.init.map(d => Cat(data.last >> d.getWidth, d)) :+ data.last
mux.data := dataPadded(tag)
mux.exc := exc(tag)
}
io.out <> Pipe(in.valid, mux, latency-1)
}
class FPToFP(val latency: Int)(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new FPResult)
val lt = Input(Bool()) // from FPToInt
})
val in = Pipe(io.in)
val signNum = Mux(in.bits.rm(1), in.bits.in1 ^ in.bits.in2, Mux(in.bits.rm(0), ~in.bits.in2, in.bits.in2))
val fsgnj = Cat(signNum(fLen), in.bits.in1(fLen-1, 0))
val fsgnjMux = Wire(new FPResult)
fsgnjMux.exc := 0.U
fsgnjMux.data := fsgnj
when (in.bits.wflags) { // fmin/fmax
val isnan1 = maxType.isNaN(in.bits.in1)
val isnan2 = maxType.isNaN(in.bits.in2)
val isInvalid = maxType.isSNaN(in.bits.in1) || maxType.isSNaN(in.bits.in2)
val isNaNOut = isnan1 && isnan2
val isLHS = isnan2 || in.bits.rm(0) =/= io.lt && !isnan1
fsgnjMux.exc := isInvalid << 4
fsgnjMux.data := Mux(isNaNOut, maxType.qNaN, Mux(isLHS, in.bits.in1, in.bits.in2))
}
val inTag = in.bits.typeTagIn
val outTag = in.bits.typeTagOut
val mux = WireDefault(fsgnjMux)
for (t <- floatTypes.init) {
when (outTag === typeTag(t).U) {
mux.data := Cat(fsgnjMux.data >> t.recodedWidth, maxType.unsafeConvert(fsgnjMux.data, t))
}
}
when (in.bits.wflags && !in.bits.ren2) { // fcvt
if (floatTypes.size > 1) {
// widening conversions simply canonicalize NaN operands
val widened = Mux(maxType.isNaN(in.bits.in1), maxType.qNaN, in.bits.in1)
fsgnjMux.data := widened
fsgnjMux.exc := maxType.isSNaN(in.bits.in1) << 4
// narrowing conversions require rounding (for RVQ, this could be
// optimized to use a single variable-position rounding unit, rather
// than two fixed-position ones)
for (outType <- floatTypes.init) when (outTag === typeTag(outType).U && ((typeTag(outType) == 0).B || outTag < inTag)) {
val narrower = Module(new hardfloat.RecFNToRecFN(maxType.exp, maxType.sig, outType.exp, outType.sig))
narrower.io.in := in.bits.in1
narrower.io.roundingMode := in.bits.rm
narrower.io.detectTininess := hardfloat.consts.tininess_afterRounding
val narrowed = sanitizeNaN(narrower.io.out, outType)
mux.data := Cat(fsgnjMux.data >> narrowed.getWidth, narrowed)
mux.exc := narrower.io.exceptionFlags
}
}
}
io.out <> Pipe(in.valid, mux, latency-1)
}
class MulAddRecFNPipe(latency: Int, expWidth: Int, sigWidth: Int) extends Module
{
override def desiredName = s"MulAddRecFNPipe_l${latency}_e${expWidth}_s${sigWidth}"
require(latency<=2)
val io = IO(new Bundle {
val validin = Input(Bool())
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
val validout = Output(Bool())
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul = Module(new hardfloat.MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul = Module(new hardfloat.MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
val valid_stage0 = Wire(Bool())
val roundingMode_stage0 = Wire(UInt(3.W))
val detectTininess_stage0 = Wire(UInt(1.W))
val postmul_regs = if(latency>0) 1 else 0
mulAddRecFNToRaw_postMul.io.fromPreMul := Pipe(io.validin, mulAddRecFNToRaw_preMul.io.toPostMul, postmul_regs).bits
mulAddRecFNToRaw_postMul.io.mulAddResult := Pipe(io.validin, mulAddResult, postmul_regs).bits
mulAddRecFNToRaw_postMul.io.roundingMode := Pipe(io.validin, io.roundingMode, postmul_regs).bits
roundingMode_stage0 := Pipe(io.validin, io.roundingMode, postmul_regs).bits
detectTininess_stage0 := Pipe(io.validin, io.detectTininess, postmul_regs).bits
valid_stage0 := Pipe(io.validin, false.B, postmul_regs).valid
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN = Module(new hardfloat.RoundRawFNToRecFN(expWidth, sigWidth, 0))
val round_regs = if(latency==2) 1 else 0
roundRawFNToRecFN.io.invalidExc := Pipe(valid_stage0, mulAddRecFNToRaw_postMul.io.invalidExc, round_regs).bits
roundRawFNToRecFN.io.in := Pipe(valid_stage0, mulAddRecFNToRaw_postMul.io.rawOut, round_regs).bits
roundRawFNToRecFN.io.roundingMode := Pipe(valid_stage0, roundingMode_stage0, round_regs).bits
roundRawFNToRecFN.io.detectTininess := Pipe(valid_stage0, detectTininess_stage0, round_regs).bits
io.validout := Pipe(valid_stage0, false.B, round_regs).valid
roundRawFNToRecFN.io.infiniteExc := false.B
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
class FPUFMAPipe(val latency: Int, val t: FType)
(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
override def desiredName = s"FPUFMAPipe_l${latency}_f${t.ieeeWidth}"
require(latency>0)
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new FPResult)
})
val valid = RegNext(io.in.valid)
val in = Reg(new FPInput)
when (io.in.valid) {
val one = 1.U << (t.sig + t.exp - 1)
val zero = (io.in.bits.in1 ^ io.in.bits.in2) & (1.U << (t.sig + t.exp))
val cmd_fma = io.in.bits.ren3
val cmd_addsub = io.in.bits.swap23
in := io.in.bits
when (cmd_addsub) { in.in2 := one }
when (!(cmd_fma || cmd_addsub)) { in.in3 := zero }
}
val fma = Module(new MulAddRecFNPipe((latency-1) min 2, t.exp, t.sig))
fma.io.validin := valid
fma.io.op := in.fmaCmd
fma.io.roundingMode := in.rm
fma.io.detectTininess := hardfloat.consts.tininess_afterRounding
fma.io.a := in.in1
fma.io.b := in.in2
fma.io.c := in.in3
val res = Wire(new FPResult)
res.data := sanitizeNaN(fma.io.out, t)
res.exc := fma.io.exceptionFlags
io.out := Pipe(fma.io.validout, res, (latency-3) max 0)
}
class FPU(cfg: FPUParams)(implicit p: Parameters) extends FPUModule()(p) {
val io = IO(new FPUIO)
val (useClockGating, useDebugROB) = coreParams match {
case r: RocketCoreParams =>
val sz = if (r.debugROB.isDefined) r.debugROB.get.size else 1
(r.clockGate, sz < 1)
case _ => (false, false)
}
val clock_en_reg = Reg(Bool())
val clock_en = clock_en_reg || io.cp_req.valid
val gated_clock =
if (!useClockGating) clock
else ClockGate(clock, clock_en, "fpu_clock_gate")
val fp_decoder = Module(new FPUDecoder)
fp_decoder.io.inst := io.inst
val id_ctrl = WireInit(fp_decoder.io.sigs)
coreParams match { case r: RocketCoreParams => r.vector.map(v => {
val v_decode = v.decoder(p) // Only need to get ren1
v_decode.io.inst := io.inst
v_decode.io.vconfig := DontCare // core deals with this
when (v_decode.io.legal && v_decode.io.read_frs1) {
id_ctrl.ren1 := true.B
id_ctrl.swap12 := false.B
id_ctrl.toint := true.B
id_ctrl.typeTagIn := I
id_ctrl.typeTagOut := Mux(io.v_sew === 3.U, D, S)
}
when (v_decode.io.write_frd) { id_ctrl.wen := true.B }
})}
val ex_reg_valid = RegNext(io.valid, false.B)
val ex_reg_inst = RegEnable(io.inst, io.valid)
val ex_reg_ctrl = RegEnable(id_ctrl, io.valid)
val ex_ra = List.fill(3)(Reg(UInt()))
// load/vector response
val load_wb = RegNext(io.ll_resp_val)
val load_wb_typeTag = RegEnable(io.ll_resp_type(1,0) - typeTagWbOffset, io.ll_resp_val)
val load_wb_data = RegEnable(io.ll_resp_data, io.ll_resp_val)
val load_wb_tag = RegEnable(io.ll_resp_tag, io.ll_resp_val)
class FPUImpl { // entering gated-clock domain
val req_valid = ex_reg_valid || io.cp_req.valid
val ex_cp_valid = io.cp_req.fire
val mem_cp_valid = RegNext(ex_cp_valid, false.B)
val wb_cp_valid = RegNext(mem_cp_valid, false.B)
val mem_reg_valid = RegInit(false.B)
val killm = (io.killm || io.nack_mem) && !mem_cp_valid
// Kill X-stage instruction if M-stage is killed. This prevents it from
// speculatively being sent to the div-sqrt unit, which can cause priority
// inversion for two back-to-back divides, the first of which is killed.
val killx = io.killx || mem_reg_valid && killm
mem_reg_valid := ex_reg_valid && !killx || ex_cp_valid
val mem_reg_inst = RegEnable(ex_reg_inst, ex_reg_valid)
val wb_reg_valid = RegNext(mem_reg_valid && (!killm || mem_cp_valid), false.B)
val cp_ctrl = Wire(new FPUCtrlSigs)
cp_ctrl :<>= io.cp_req.bits.viewAsSupertype(new FPUCtrlSigs)
io.cp_resp.valid := false.B
io.cp_resp.bits.data := 0.U
io.cp_resp.bits.exc := DontCare
val ex_ctrl = Mux(ex_cp_valid, cp_ctrl, ex_reg_ctrl)
val mem_ctrl = RegEnable(ex_ctrl, req_valid)
val wb_ctrl = RegEnable(mem_ctrl, mem_reg_valid)
// CoreMonitorBundle to monitor fp register file writes
val frfWriteBundle = Seq.fill(2)(WireInit(new CoreMonitorBundle(xLen, fLen), DontCare))
frfWriteBundle.foreach { i =>
i.clock := clock
i.reset := reset
i.hartid := io.hartid
i.timer := io.time(31,0)
i.valid := false.B
i.wrenx := false.B
i.wrenf := false.B
i.excpt := false.B
}
// regfile
val regfile = Mem(32, Bits((fLen+1).W))
when (load_wb) {
val wdata = recode(load_wb_data, load_wb_typeTag)
regfile(load_wb_tag) := wdata
assert(consistent(wdata))
if (enableCommitLog)
printf("f%d p%d 0x%x\n", load_wb_tag, load_wb_tag + 32.U, ieee(wdata))
if (useDebugROB)
DebugROB.pushWb(clock, reset, io.hartid, load_wb, load_wb_tag + 32.U, ieee(wdata))
frfWriteBundle(0).wrdst := load_wb_tag
frfWriteBundle(0).wrenf := true.B
frfWriteBundle(0).wrdata := ieee(wdata)
}
val ex_rs = ex_ra.map(a => regfile(a))
when (io.valid) {
when (id_ctrl.ren1) {
when (!id_ctrl.swap12) { ex_ra(0) := io.inst(19,15) }
when (id_ctrl.swap12) { ex_ra(1) := io.inst(19,15) }
}
when (id_ctrl.ren2) {
when (id_ctrl.swap12) { ex_ra(0) := io.inst(24,20) }
when (id_ctrl.swap23) { ex_ra(2) := io.inst(24,20) }
when (!id_ctrl.swap12 && !id_ctrl.swap23) { ex_ra(1) := io.inst(24,20) }
}
when (id_ctrl.ren3) { ex_ra(2) := io.inst(31,27) }
}
val ex_rm = Mux(ex_reg_inst(14,12) === 7.U, io.fcsr_rm, ex_reg_inst(14,12))
def fuInput(minT: Option[FType]): FPInput = {
val req = Wire(new FPInput)
val tag = ex_ctrl.typeTagIn
req.viewAsSupertype(new Bundle with HasFPUCtrlSigs) :#= ex_ctrl.viewAsSupertype(new Bundle with HasFPUCtrlSigs)
req.rm := ex_rm
req.in1 := unbox(ex_rs(0), tag, minT)
req.in2 := unbox(ex_rs(1), tag, minT)
req.in3 := unbox(ex_rs(2), tag, minT)
req.typ := ex_reg_inst(21,20)
req.fmt := ex_reg_inst(26,25)
req.fmaCmd := ex_reg_inst(3,2) | (!ex_ctrl.ren3 && ex_reg_inst(27))
when (ex_cp_valid) {
req := io.cp_req.bits
when (io.cp_req.bits.swap12) {
req.in1 := io.cp_req.bits.in2
req.in2 := io.cp_req.bits.in1
}
when (io.cp_req.bits.swap23) {
req.in2 := io.cp_req.bits.in3
req.in3 := io.cp_req.bits.in2
}
}
req
}
val sfma = Module(new FPUFMAPipe(cfg.sfmaLatency, FType.S))
sfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === S
sfma.io.in.bits := fuInput(Some(sfma.t))
val fpiu = Module(new FPToInt)
fpiu.io.in.valid := req_valid && (ex_ctrl.toint || ex_ctrl.div || ex_ctrl.sqrt || (ex_ctrl.fastpipe && ex_ctrl.wflags))
fpiu.io.in.bits := fuInput(None)
io.store_data := fpiu.io.out.bits.store
io.toint_data := fpiu.io.out.bits.toint
when(fpiu.io.out.valid && mem_cp_valid && mem_ctrl.toint){
io.cp_resp.bits.data := fpiu.io.out.bits.toint
io.cp_resp.valid := true.B
}
val ifpu = Module(new IntToFP(cfg.ifpuLatency))
ifpu.io.in.valid := req_valid && ex_ctrl.fromint
ifpu.io.in.bits := fpiu.io.in.bits
ifpu.io.in.bits.in1 := Mux(ex_cp_valid, io.cp_req.bits.in1, io.fromint_data)
val fpmu = Module(new FPToFP(cfg.fpmuLatency))
fpmu.io.in.valid := req_valid && ex_ctrl.fastpipe
fpmu.io.in.bits := fpiu.io.in.bits
fpmu.io.lt := fpiu.io.out.bits.lt
val divSqrt_wen = WireDefault(false.B)
val divSqrt_inFlight = WireDefault(false.B)
val divSqrt_waddr = Reg(UInt(5.W))
val divSqrt_cp = Reg(Bool())
val divSqrt_typeTag = Wire(UInt(log2Up(floatTypes.size).W))
val divSqrt_wdata = Wire(UInt((fLen+1).W))
val divSqrt_flags = Wire(UInt(FPConstants.FLAGS_SZ.W))
divSqrt_typeTag := DontCare
divSqrt_wdata := DontCare
divSqrt_flags := DontCare
// writeback arbitration
case class Pipe(p: Module, lat: Int, cond: (FPUCtrlSigs) => Bool, res: FPResult)
val pipes = List(
Pipe(fpmu, fpmu.latency, (c: FPUCtrlSigs) => c.fastpipe, fpmu.io.out.bits),
Pipe(ifpu, ifpu.latency, (c: FPUCtrlSigs) => c.fromint, ifpu.io.out.bits),
Pipe(sfma, sfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === S, sfma.io.out.bits)) ++
(fLen > 32).option({
val dfma = Module(new FPUFMAPipe(cfg.dfmaLatency, FType.D))
dfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === D
dfma.io.in.bits := fuInput(Some(dfma.t))
Pipe(dfma, dfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === D, dfma.io.out.bits)
}) ++
(minFLen == 16).option({
val hfma = Module(new FPUFMAPipe(cfg.sfmaLatency, FType.H))
hfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === H
hfma.io.in.bits := fuInput(Some(hfma.t))
Pipe(hfma, hfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === H, hfma.io.out.bits)
})
def latencyMask(c: FPUCtrlSigs, offset: Int) = {
require(pipes.forall(_.lat >= offset))
pipes.map(p => Mux(p.cond(c), (1 << p.lat-offset).U, 0.U)).reduce(_|_)
}
def pipeid(c: FPUCtrlSigs) = pipes.zipWithIndex.map(p => Mux(p._1.cond(c), p._2.U, 0.U)).reduce(_|_)
val maxLatency = pipes.map(_.lat).max
val memLatencyMask = latencyMask(mem_ctrl, 2)
class WBInfo extends Bundle {
val rd = UInt(5.W)
val typeTag = UInt(log2Up(floatTypes.size).W)
val cp = Bool()
val pipeid = UInt(log2Ceil(pipes.size).W)
}
val wen = RegInit(0.U((maxLatency-1).W))
val wbInfo = Reg(Vec(maxLatency-1, new WBInfo))
val mem_wen = mem_reg_valid && (mem_ctrl.fma || mem_ctrl.fastpipe || mem_ctrl.fromint)
val write_port_busy = RegEnable(mem_wen && (memLatencyMask & latencyMask(ex_ctrl, 1)).orR || (wen & latencyMask(ex_ctrl, 0)).orR, req_valid)
ccover(mem_reg_valid && write_port_busy, "WB_STRUCTURAL", "structural hazard on writeback")
for (i <- 0 until maxLatency-2) {
when (wen(i+1)) { wbInfo(i) := wbInfo(i+1) }
}
wen := wen >> 1
when (mem_wen) {
when (!killm) {
wen := wen >> 1 | memLatencyMask
}
for (i <- 0 until maxLatency-1) {
when (!write_port_busy && memLatencyMask(i)) {
wbInfo(i).cp := mem_cp_valid
wbInfo(i).typeTag := mem_ctrl.typeTagOut
wbInfo(i).pipeid := pipeid(mem_ctrl)
wbInfo(i).rd := mem_reg_inst(11,7)
}
}
}
val waddr = Mux(divSqrt_wen, divSqrt_waddr, wbInfo(0).rd)
val wb_cp = Mux(divSqrt_wen, divSqrt_cp, wbInfo(0).cp)
val wtypeTag = Mux(divSqrt_wen, divSqrt_typeTag, wbInfo(0).typeTag)
val wdata = box(Mux(divSqrt_wen, divSqrt_wdata, (pipes.map(_.res.data): Seq[UInt])(wbInfo(0).pipeid)), wtypeTag)
val wexc = (pipes.map(_.res.exc): Seq[UInt])(wbInfo(0).pipeid)
when ((!wbInfo(0).cp && wen(0)) || divSqrt_wen) {
assert(consistent(wdata))
regfile(waddr) := wdata
if (enableCommitLog) {
printf("f%d p%d 0x%x\n", waddr, waddr + 32.U, ieee(wdata))
}
frfWriteBundle(1).wrdst := waddr
frfWriteBundle(1).wrenf := true.B
frfWriteBundle(1).wrdata := ieee(wdata)
}
if (useDebugROB) {
DebugROB.pushWb(clock, reset, io.hartid, (!wbInfo(0).cp && wen(0)) || divSqrt_wen, waddr + 32.U, ieee(wdata))
}
when (wb_cp && (wen(0) || divSqrt_wen)) {
io.cp_resp.bits.data := wdata
io.cp_resp.valid := true.B
}
assert(!io.cp_req.valid || pipes.forall(_.lat == pipes.head.lat).B,
s"FPU only supports coprocessor if FMA pipes have uniform latency ${pipes.map(_.lat)}")
// Avoid structural hazards and nacking of external requests
// toint responds in the MEM stage, so an incoming toint can induce a structural hazard against inflight FMAs
io.cp_req.ready := !ex_reg_valid && !(cp_ctrl.toint && wen =/= 0.U) && !divSqrt_inFlight
val wb_toint_valid = wb_reg_valid && wb_ctrl.toint
val wb_toint_exc = RegEnable(fpiu.io.out.bits.exc, mem_ctrl.toint)
io.fcsr_flags.valid := wb_toint_valid || divSqrt_wen || wen(0)
io.fcsr_flags.bits :=
Mux(wb_toint_valid, wb_toint_exc, 0.U) |
Mux(divSqrt_wen, divSqrt_flags, 0.U) |
Mux(wen(0), wexc, 0.U)
val divSqrt_write_port_busy = (mem_ctrl.div || mem_ctrl.sqrt) && wen.orR
io.fcsr_rdy := !(ex_reg_valid && ex_ctrl.wflags || mem_reg_valid && mem_ctrl.wflags || wb_reg_valid && wb_ctrl.toint || wen.orR || divSqrt_inFlight)
io.nack_mem := (write_port_busy || divSqrt_write_port_busy || divSqrt_inFlight) && !mem_cp_valid
io.dec <> id_ctrl
def useScoreboard(f: ((Pipe, Int)) => Bool) = pipes.zipWithIndex.filter(_._1.lat > 3).map(x => f(x)).fold(false.B)(_||_)
io.sboard_set := wb_reg_valid && !wb_cp_valid && RegNext(useScoreboard(_._1.cond(mem_ctrl)) || mem_ctrl.div || mem_ctrl.sqrt || mem_ctrl.vec)
io.sboard_clr := !wb_cp_valid && (divSqrt_wen || (wen(0) && useScoreboard(x => wbInfo(0).pipeid === x._2.U)))
io.sboard_clra := waddr
ccover(io.sboard_clr && load_wb, "DUAL_WRITEBACK", "load and FMA writeback on same cycle")
// we don't currently support round-max-magnitude (rm=4)
io.illegal_rm := io.inst(14,12).isOneOf(5.U, 6.U) || io.inst(14,12) === 7.U && io.fcsr_rm >= 5.U
if (cfg.divSqrt) {
val divSqrt_inValid = mem_reg_valid && (mem_ctrl.div || mem_ctrl.sqrt) && !divSqrt_inFlight
val divSqrt_killed = RegNext(divSqrt_inValid && killm, true.B)
when (divSqrt_inValid) {
divSqrt_waddr := mem_reg_inst(11,7)
divSqrt_cp := mem_cp_valid
}
ccover(divSqrt_inFlight && divSqrt_killed, "DIV_KILLED", "divide killed after issued to divider")
ccover(divSqrt_inFlight && mem_reg_valid && (mem_ctrl.div || mem_ctrl.sqrt), "DIV_BUSY", "divider structural hazard")
ccover(mem_reg_valid && divSqrt_write_port_busy, "DIV_WB_STRUCTURAL", "structural hazard on division writeback")
for (t <- floatTypes) {
val tag = mem_ctrl.typeTagOut
val divSqrt = withReset(divSqrt_killed) { Module(new hardfloat.DivSqrtRecFN_small(t.exp, t.sig, 0)) }
divSqrt.io.inValid := divSqrt_inValid && tag === typeTag(t).U
divSqrt.io.sqrtOp := mem_ctrl.sqrt
divSqrt.io.a := maxType.unsafeConvert(fpiu.io.out.bits.in.in1, t)
divSqrt.io.b := maxType.unsafeConvert(fpiu.io.out.bits.in.in2, t)
divSqrt.io.roundingMode := fpiu.io.out.bits.in.rm
divSqrt.io.detectTininess := hardfloat.consts.tininess_afterRounding
when (!divSqrt.io.inReady) { divSqrt_inFlight := true.B } // only 1 in flight
when (divSqrt.io.outValid_div || divSqrt.io.outValid_sqrt) {
divSqrt_wen := !divSqrt_killed
divSqrt_wdata := sanitizeNaN(divSqrt.io.out, t)
divSqrt_flags := divSqrt.io.exceptionFlags
divSqrt_typeTag := typeTag(t).U
}
}
when (divSqrt_killed) { divSqrt_inFlight := false.B }
} else {
when (id_ctrl.div || id_ctrl.sqrt) { io.illegal_rm := true.B }
}
// gate the clock
clock_en_reg := !useClockGating.B ||
io.keep_clock_enabled || // chicken bit
io.valid || // ID stage
req_valid || // EX stage
mem_reg_valid || mem_cp_valid || // MEM stage
wb_reg_valid || wb_cp_valid || // WB stage
wen.orR || divSqrt_inFlight || // post-WB stage
io.ll_resp_val // load writeback
} // leaving gated-clock domain
val fpuImpl = withClock (gated_clock) { new FPUImpl }
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
property.cover(cond, s"FPU_$label", "Core;;" + desc)
}
| module FPUFMAPipe_l4_f64_6( // @[FPU.scala:697:7]
input clock, // @[FPU.scala:697:7]
input reset, // @[FPU.scala:697:7]
input io_in_valid, // @[FPU.scala:702:14]
input io_in_bits_ldst, // @[FPU.scala:702:14]
input io_in_bits_wen, // @[FPU.scala:702:14]
input io_in_bits_ren1, // @[FPU.scala:702:14]
input io_in_bits_ren2, // @[FPU.scala:702:14]
input io_in_bits_ren3, // @[FPU.scala:702:14]
input io_in_bits_swap12, // @[FPU.scala:702:14]
input io_in_bits_swap23, // @[FPU.scala:702:14]
input [1:0] io_in_bits_typeTagIn, // @[FPU.scala:702:14]
input [1:0] io_in_bits_typeTagOut, // @[FPU.scala:702:14]
input io_in_bits_fromint, // @[FPU.scala:702:14]
input io_in_bits_toint, // @[FPU.scala:702:14]
input io_in_bits_fastpipe, // @[FPU.scala:702:14]
input io_in_bits_fma, // @[FPU.scala:702:14]
input io_in_bits_div, // @[FPU.scala:702:14]
input io_in_bits_sqrt, // @[FPU.scala:702:14]
input io_in_bits_wflags, // @[FPU.scala:702:14]
input io_in_bits_vec, // @[FPU.scala:702:14]
input [2:0] io_in_bits_rm, // @[FPU.scala:702:14]
input [1:0] io_in_bits_fmaCmd, // @[FPU.scala:702:14]
input [1:0] io_in_bits_typ, // @[FPU.scala:702:14]
input [1:0] io_in_bits_fmt, // @[FPU.scala:702:14]
input [64:0] io_in_bits_in1, // @[FPU.scala:702:14]
input [64:0] io_in_bits_in2, // @[FPU.scala:702:14]
input [64:0] io_in_bits_in3, // @[FPU.scala:702:14]
output [64:0] io_out_bits_data, // @[FPU.scala:702:14]
output [4:0] io_out_bits_exc // @[FPU.scala:702:14]
);
wire [64:0] _fma_io_out; // @[FPU.scala:719:19]
wire _fma_io_validout; // @[FPU.scala:719:19]
wire io_in_valid_0 = io_in_valid; // @[FPU.scala:697:7]
wire io_in_bits_ldst_0 = io_in_bits_ldst; // @[FPU.scala:697:7]
wire io_in_bits_wen_0 = io_in_bits_wen; // @[FPU.scala:697:7]
wire io_in_bits_ren1_0 = io_in_bits_ren1; // @[FPU.scala:697:7]
wire io_in_bits_ren2_0 = io_in_bits_ren2; // @[FPU.scala:697:7]
wire io_in_bits_ren3_0 = io_in_bits_ren3; // @[FPU.scala:697:7]
wire io_in_bits_swap12_0 = io_in_bits_swap12; // @[FPU.scala:697:7]
wire io_in_bits_swap23_0 = io_in_bits_swap23; // @[FPU.scala:697:7]
wire [1:0] io_in_bits_typeTagIn_0 = io_in_bits_typeTagIn; // @[FPU.scala:697:7]
wire [1:0] io_in_bits_typeTagOut_0 = io_in_bits_typeTagOut; // @[FPU.scala:697:7]
wire io_in_bits_fromint_0 = io_in_bits_fromint; // @[FPU.scala:697:7]
wire io_in_bits_toint_0 = io_in_bits_toint; // @[FPU.scala:697:7]
wire io_in_bits_fastpipe_0 = io_in_bits_fastpipe; // @[FPU.scala:697:7]
wire io_in_bits_fma_0 = io_in_bits_fma; // @[FPU.scala:697:7]
wire io_in_bits_div_0 = io_in_bits_div; // @[FPU.scala:697:7]
wire io_in_bits_sqrt_0 = io_in_bits_sqrt; // @[FPU.scala:697:7]
wire io_in_bits_wflags_0 = io_in_bits_wflags; // @[FPU.scala:697:7]
wire io_in_bits_vec_0 = io_in_bits_vec; // @[FPU.scala:697:7]
wire [2:0] io_in_bits_rm_0 = io_in_bits_rm; // @[FPU.scala:697:7]
wire [1:0] io_in_bits_fmaCmd_0 = io_in_bits_fmaCmd; // @[FPU.scala:697:7]
wire [1:0] io_in_bits_typ_0 = io_in_bits_typ; // @[FPU.scala:697:7]
wire [1:0] io_in_bits_fmt_0 = io_in_bits_fmt; // @[FPU.scala:697:7]
wire [64:0] io_in_bits_in1_0 = io_in_bits_in1; // @[FPU.scala:697:7]
wire [64:0] io_in_bits_in2_0 = io_in_bits_in2; // @[FPU.scala:697:7]
wire [64:0] io_in_bits_in3_0 = io_in_bits_in3; // @[FPU.scala:697:7]
wire [63:0] one = 64'h8000000000000000; // @[FPU.scala:710:19]
wire [64:0] _zero_T_1 = 65'h10000000000000000; // @[FPU.scala:711:57]
wire [64:0] _res_data_maskedNaN_T = 65'h1EFEFFFFFFFFFFFFF; // @[FPU.scala:413:27]
wire io_out_pipe_out_valid; // @[Valid.scala:135:21]
wire [64:0] io_out_pipe_out_bits_data; // @[Valid.scala:135:21]
wire [4:0] io_out_pipe_out_bits_exc; // @[Valid.scala:135:21]
wire [64:0] io_out_bits_data_0; // @[FPU.scala:697:7]
wire [4:0] io_out_bits_exc_0; // @[FPU.scala:697:7]
wire io_out_valid; // @[FPU.scala:697:7]
reg valid; // @[FPU.scala:707:22]
reg in_ldst; // @[FPU.scala:708:15]
reg in_wen; // @[FPU.scala:708:15]
reg in_ren1; // @[FPU.scala:708:15]
reg in_ren2; // @[FPU.scala:708:15]
reg in_ren3; // @[FPU.scala:708:15]
reg in_swap12; // @[FPU.scala:708:15]
reg in_swap23; // @[FPU.scala:708:15]
reg [1:0] in_typeTagIn; // @[FPU.scala:708:15]
reg [1:0] in_typeTagOut; // @[FPU.scala:708:15]
reg in_fromint; // @[FPU.scala:708:15]
reg in_toint; // @[FPU.scala:708:15]
reg in_fastpipe; // @[FPU.scala:708:15]
reg in_fma; // @[FPU.scala:708:15]
reg in_div; // @[FPU.scala:708:15]
reg in_sqrt; // @[FPU.scala:708:15]
reg in_wflags; // @[FPU.scala:708:15]
reg in_vec; // @[FPU.scala:708:15]
reg [2:0] in_rm; // @[FPU.scala:708:15]
reg [1:0] in_fmaCmd; // @[FPU.scala:708:15]
reg [1:0] in_typ; // @[FPU.scala:708:15]
reg [1:0] in_fmt; // @[FPU.scala:708:15]
reg [64:0] in_in1; // @[FPU.scala:708:15]
reg [64:0] in_in2; // @[FPU.scala:708:15]
reg [64:0] in_in3; // @[FPU.scala:708:15]
wire [64:0] _zero_T = io_in_bits_in1_0 ^ io_in_bits_in2_0; // @[FPU.scala:697:7, :711:32]
wire [64:0] zero = _zero_T & 65'h10000000000000000; // @[FPU.scala:711:{32,50}]
wire [64:0] _res_data_T_2; // @[FPU.scala:414:10]
wire [64:0] res_data; // @[FPU.scala:728:17]
wire [4:0] res_exc; // @[FPU.scala:728:17]
wire [64:0] res_data_maskedNaN = _fma_io_out & 65'h1EFEFFFFFFFFFFFFF; // @[FPU.scala:413:25, :719:19]
wire [2:0] _res_data_T = _fma_io_out[63:61]; // @[FPU.scala:249:25, :719:19]
wire _res_data_T_1 = &_res_data_T; // @[FPU.scala:249:{25,56}]
assign _res_data_T_2 = _res_data_T_1 ? res_data_maskedNaN : _fma_io_out; // @[FPU.scala:249:56, :413:25, :414:10, :719:19]
assign res_data = _res_data_T_2; // @[FPU.scala:414:10, :728:17]
reg io_out_pipe_v; // @[Valid.scala:141:24]
assign io_out_pipe_out_valid = io_out_pipe_v; // @[Valid.scala:135:21, :141:24]
reg [64:0] io_out_pipe_b_data; // @[Valid.scala:142:26]
assign io_out_pipe_out_bits_data = io_out_pipe_b_data; // @[Valid.scala:135:21, :142:26]
reg [4:0] io_out_pipe_b_exc; // @[Valid.scala:142:26]
assign io_out_pipe_out_bits_exc = io_out_pipe_b_exc; // @[Valid.scala:135:21, :142:26]
assign io_out_valid = io_out_pipe_out_valid; // @[Valid.scala:135:21]
assign io_out_bits_data_0 = io_out_pipe_out_bits_data; // @[Valid.scala:135:21]
assign io_out_bits_exc_0 = io_out_pipe_out_bits_exc; // @[Valid.scala:135:21]
always @(posedge clock) begin // @[FPU.scala:697:7]
valid <= io_in_valid_0; // @[FPU.scala:697:7, :707:22]
if (io_in_valid_0) begin // @[FPU.scala:697:7]
in_ldst <= io_in_bits_ldst_0; // @[FPU.scala:697:7, :708:15]
in_wen <= io_in_bits_wen_0; // @[FPU.scala:697:7, :708:15]
in_ren1 <= io_in_bits_ren1_0; // @[FPU.scala:697:7, :708:15]
in_ren2 <= io_in_bits_ren2_0; // @[FPU.scala:697:7, :708:15]
in_ren3 <= io_in_bits_ren3_0; // @[FPU.scala:697:7, :708:15]
in_swap12 <= io_in_bits_swap12_0; // @[FPU.scala:697:7, :708:15]
in_swap23 <= io_in_bits_swap23_0; // @[FPU.scala:697:7, :708:15]
in_typeTagIn <= io_in_bits_typeTagIn_0; // @[FPU.scala:697:7, :708:15]
in_typeTagOut <= io_in_bits_typeTagOut_0; // @[FPU.scala:697:7, :708:15]
in_fromint <= io_in_bits_fromint_0; // @[FPU.scala:697:7, :708:15]
in_toint <= io_in_bits_toint_0; // @[FPU.scala:697:7, :708:15]
in_fastpipe <= io_in_bits_fastpipe_0; // @[FPU.scala:697:7, :708:15]
in_fma <= io_in_bits_fma_0; // @[FPU.scala:697:7, :708:15]
in_div <= io_in_bits_div_0; // @[FPU.scala:697:7, :708:15]
in_sqrt <= io_in_bits_sqrt_0; // @[FPU.scala:697:7, :708:15]
in_wflags <= io_in_bits_wflags_0; // @[FPU.scala:697:7, :708:15]
in_vec <= io_in_bits_vec_0; // @[FPU.scala:697:7, :708:15]
in_rm <= io_in_bits_rm_0; // @[FPU.scala:697:7, :708:15]
in_fmaCmd <= io_in_bits_fmaCmd_0; // @[FPU.scala:697:7, :708:15]
in_typ <= io_in_bits_typ_0; // @[FPU.scala:697:7, :708:15]
in_fmt <= io_in_bits_fmt_0; // @[FPU.scala:697:7, :708:15]
in_in1 <= io_in_bits_in1_0; // @[FPU.scala:697:7, :708:15]
in_in2 <= io_in_bits_swap23_0 ? 65'h8000000000000000 : io_in_bits_in2_0; // @[FPU.scala:697:7, :708:15, :714:8, :715:{23,32}]
in_in3 <= io_in_bits_ren3_0 | io_in_bits_swap23_0 ? io_in_bits_in3_0 : zero; // @[FPU.scala:697:7, :708:15, :711:50, :714:8, :716:{21,37,46}]
end
if (_fma_io_validout) begin // @[FPU.scala:719:19]
io_out_pipe_b_data <= res_data; // @[Valid.scala:142:26]
io_out_pipe_b_exc <= res_exc; // @[Valid.scala:142:26]
end
if (reset) // @[FPU.scala:697:7]
io_out_pipe_v <= 1'h0; // @[Valid.scala:141:24]
else // @[FPU.scala:697:7]
io_out_pipe_v <= _fma_io_validout; // @[Valid.scala:141:24]
always @(posedge)
MulAddRecFNPipe_l2_e11_s53_6 fma ( // @[FPU.scala:719:19]
.clock (clock),
.reset (reset),
.io_validin (valid), // @[FPU.scala:707:22]
.io_op (in_fmaCmd), // @[FPU.scala:708:15]
.io_a (in_in1), // @[FPU.scala:708:15]
.io_b (in_in2), // @[FPU.scala:708:15]
.io_c (in_in3), // @[FPU.scala:708:15]
.io_roundingMode (in_rm), // @[FPU.scala:708:15]
.io_out (_fma_io_out),
.io_exceptionFlags (res_exc),
.io_validout (_fma_io_validout)
); // @[FPU.scala:719:19]
assign io_out_bits_data = io_out_bits_data_0; // @[FPU.scala:697:7]
assign io_out_bits_exc = io_out_bits_exc_0; // @[FPU.scala:697:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File UnsafeAXI4ToTL.scala:
package ara
import chisel3._
import chisel3.util._
import freechips.rocketchip.amba._
import freechips.rocketchip.amba.axi4._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle {
val data = UInt(dataWidth.W)
val resp = UInt(respWidth.W)
val last = Bool()
val user = BundleMap(userFields)
}
/** Parameters for [[BaseReservableListBuffer]] and all child classes.
*
* @param numEntries Total number of elements that can be stored in the 'data' RAM
* @param numLists Maximum number of linked lists
* @param numBeats Maximum number of beats per entry
*/
case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) {
// Avoid zero-width wires when we call 'log2Ceil'
val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries)
val listBits = if (numLists == 1) 1 else log2Ceil(numLists)
val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats)
}
case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName)
extends MixedAdapterNode(AXI4Imp, TLImp)(
dFn = { case mp =>
TLMasterPortParameters.v2(
masters = mp.masters.zipWithIndex.map { case (m, i) =>
// Support 'numTlTxns' read requests and 'numTlTxns' write requests at once.
val numSourceIds = numTlTxns * 2
TLMasterParameters.v2(
name = m.name,
sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds),
nodePath = m.nodePath
)
},
echoFields = mp.echoFields,
requestFields = AMBAProtField() +: mp.requestFields,
responseKeys = mp.responseKeys
)
},
uFn = { mp =>
AXI4SlavePortParameters(
slaves = mp.managers.map { m =>
val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits))
AXI4SlaveParameters(
address = m.address,
resources = m.resources,
regionType = m.regionType,
executable = m.executable,
nodePath = m.nodePath,
supportsWrite = m.supportsPutPartial.intersect(maxXfer),
supportsRead = m.supportsGet.intersect(maxXfer),
interleavedId = Some(0) // TL2 never interleaves D beats
)
},
beatBytes = mp.beatBytes,
minLatency = mp.minLatency,
responseFields = mp.responseFields,
requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt)
)
}
)
class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule {
require(numTlTxns >= 1)
require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2")
val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt)
lazy val module = new LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
edgeIn.master.masters.foreach { m =>
require(m.aligned, "AXI4ToTL requires aligned requests")
}
val numIds = edgeIn.master.endId
val beatBytes = edgeOut.slave.beatBytes
val maxTransfer = edgeOut.slave.maxTransfer
val maxBeats = maxTransfer / beatBytes
// Look for an Error device to redirect bad requests
val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError")
require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.")
val errorDev = errorDevs.maxBy(_.maxTransfer)
val errorDevAddr = errorDev.address.head.base
require(
errorDev.supportsPutPartial.contains(maxTransfer),
s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer"
)
require(
errorDev.supportsGet.contains(maxTransfer),
s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer"
)
// All of the read-response reordering logic.
val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields)
val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats)
val listBuffer = if (numTlTxns > 1) {
Module(new ReservableListBuffer(listBufData, listBufParams))
} else {
Module(new PassthroughListBuffer(listBufData, listBufParams))
}
// To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to
// 0 for read requests and 1 for write requests.
val isReadSourceBit = 0.U(1.W)
val isWriteSourceBit = 1.U(1.W)
/* Read request logic */
val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val rBytes1 = in.ar.bits.bytes1()
val rSize = OH1ToUInt(rBytes1)
val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize)
val rId = if (numTlTxns > 1) {
Cat(isReadSourceBit, listBuffer.ioReservedIndex)
} else {
isReadSourceBit
}
val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Indicates if there are still valid TileLink source IDs left to use.
val canIssueR = listBuffer.ioReserve.ready
listBuffer.ioReserve.bits := in.ar.bits.id
listBuffer.ioReserve.valid := in.ar.valid && rOut.ready
in.ar.ready := rOut.ready && canIssueR
rOut.valid := in.ar.valid && canIssueR
rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2
rOut.bits.user :<= in.ar.bits.user
rOut.bits.user.lift(AMBAProt).foreach { rProt =>
rProt.privileged := in.ar.bits.prot(0)
rProt.secure := !in.ar.bits.prot(1)
rProt.fetch := in.ar.bits.prot(2)
rProt.bufferable := in.ar.bits.cache(0)
rProt.modifiable := in.ar.bits.cache(1)
rProt.readalloc := in.ar.bits.cache(2)
rProt.writealloc := in.ar.bits.cache(3)
}
/* Write request logic */
// Strip off the MSB, which identifies the transaction as read vs write.
val strippedResponseSourceId = if (numTlTxns > 1) {
out.d.bits.source((out.d.bits.source).getWidth - 2, 0)
} else {
// When there's only 1 TileLink transaction allowed for read/write, then this field is always 0.
0.U(1.W)
}
// Track when a write request burst is in progress.
val writeBurstBusy = RegInit(false.B)
when(in.w.fire) {
writeBurstBusy := !in.w.bits.last
}
val usedWriteIds = RegInit(0.U(numTlTxns.W))
val canIssueW = !usedWriteIds.andR
val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W))
val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W))
usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet
// Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't
// change mid-burst.
val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W))
val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy
val freeWriteIdIndex = OHToUInt(freeWriteIdOH)
freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds
val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val wBytes1 = in.aw.bits.bytes1()
val wSize = OH1ToUInt(wBytes1)
val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize)
val wId = if (numTlTxns > 1) {
Cat(isWriteSourceBit, freeWriteIdIndex)
} else {
isWriteSourceBit
}
val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain
// asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but
// the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb
// bits during a W-channel burst.
in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW
in.w.ready := wOut.ready && in.aw.valid && canIssueW
wOut.valid := in.aw.valid && in.w.valid && canIssueW
wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2
in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ }
wOut.bits.user :<= in.aw.bits.user
wOut.bits.user.lift(AMBAProt).foreach { wProt =>
wProt.privileged := in.aw.bits.prot(0)
wProt.secure := !in.aw.bits.prot(1)
wProt.fetch := in.aw.bits.prot(2)
wProt.bufferable := in.aw.bits.cache(0)
wProt.modifiable := in.aw.bits.cache(1)
wProt.readalloc := in.aw.bits.cache(2)
wProt.writealloc := in.aw.bits.cache(3)
}
// Merge the AXI4 read/write requests into the TL-A channel.
TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut))
/* Read/write response logic */
val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle)))
val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle)))
val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY)
val dHasData = edgeOut.hasData(out.d.bits)
val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d)
val dNumBeats1 = edgeOut.numBeats1(out.d.bits)
// Handle cases where writeack arrives before write is done
val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U
out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck)
listBuffer.ioDataOut.ready := okR.ready
okR.valid := listBuffer.ioDataOut.valid
okB.valid := out.d.valid && !dHasData && !writeEarlyAck
listBuffer.ioResponse.valid := out.d.valid && dHasData
listBuffer.ioResponse.bits.index := strippedResponseSourceId
listBuffer.ioResponse.bits.data.data := out.d.bits.data
listBuffer.ioResponse.bits.data.resp := dResp
listBuffer.ioResponse.bits.data.last := dLast
listBuffer.ioResponse.bits.data.user :<= out.d.bits.user
listBuffer.ioResponse.bits.count := dCount
listBuffer.ioResponse.bits.numBeats1 := dNumBeats1
okR.bits.id := listBuffer.ioDataOut.bits.listIndex
okR.bits.data := listBuffer.ioDataOut.bits.payload.data
okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp
okR.bits.last := listBuffer.ioDataOut.bits.payload.last
okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user
// Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write
// response, mark the write transaction as complete.
val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W))
val writeResponseId = writeIdMap.read(strippedResponseSourceId)
when(wOut.fire) {
writeIdMap.write(freeWriteIdIndex, in.aw.bits.id)
}
when(edgeOut.done(wOut)) {
usedWriteIdsSet := freeWriteIdOH
}
when(okB.fire) {
usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns)
}
okB.bits.id := writeResponseId
okB.bits.resp := dResp
okB.bits.user :<= out.d.bits.user
// AXI4 needs irrevocable behaviour
in.r <> Queue.irrevocable(okR, 1, flow = true)
in.b <> Queue.irrevocable(okB, 1, flow = true)
// Unused channels
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
/* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */
def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = {
val lReqType = reqType.toLowerCase
when(a.valid) {
assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U)
// Narrow transfers and FIXED bursts must be single-beat bursts.
when(a.bits.len =/= 0.U) {
assert(
a.bits.size === log2Ceil(beatBytes).U,
s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)",
1.U << a.bits.size,
a.bits.len + 1.U
)
assert(
a.bits.burst =/= AXI4Parameters.BURST_FIXED,
s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)",
a.bits.len + 1.U
)
}
// Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in
// particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink
// Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts.
}
}
checkRequest(in.ar, "Read")
checkRequest(in.aw, "Write")
}
}
}
object UnsafeAXI4ToTL {
def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = {
val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt))
axi42tl.node
}
}
/* ReservableListBuffer logic, and associated classes. */
class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle {
val index = UInt(params.entryBits.W)
val count = UInt(params.beatBits.W)
val numBeats1 = UInt(params.beatBits.W)
}
class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle {
val listIndex = UInt(params.listBits.W)
}
/** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */
abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends Module {
require(params.numEntries > 0)
require(params.numLists > 0)
val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W))))
val ioReservedIndex = IO(Output(UInt(params.entryBits.W)))
val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params))))
val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params)))
}
/** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve
* linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the
* 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a
* given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order.
*
* ==Constructor==
* @param gen Chisel type of linked list data element
* @param params Other parameters
*
* ==Module IO==
* @param ioReserve Index of list to reserve a new element in
* @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire'
* @param ioResponse Payload containing response data and linked-list-entry index
* @param ioDataOut Payload containing data read from response linked list and linked list index
*/
class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
val valid = RegInit(0.U(params.numLists.W))
val head = Mem(params.numLists, UInt(params.entryBits.W))
val tail = Mem(params.numLists, UInt(params.entryBits.W))
val used = RegInit(0.U(params.numEntries.W))
val next = Mem(params.numEntries, UInt(params.entryBits.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) }
val dataIsPresent = RegInit(0.U(params.numEntries.W))
val beats = Mem(params.numEntries, UInt(params.beatBits.W))
// The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower.
val dataMemReadEnable = WireDefault(false.B)
val dataMemWriteEnable = WireDefault(false.B)
assert(!(dataMemReadEnable && dataMemWriteEnable))
// 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the
// lowest-index entry in the 'data' RAM which is free.
val freeOH = Wire(UInt(params.numEntries.W))
val freeIndex = OHToUInt(freeOH)
freeOH := ~(leftOR(~used) << 1) & ~used
ioReservedIndex := freeIndex
val validSet = WireDefault(0.U(params.numLists.W))
val validClr = WireDefault(0.U(params.numLists.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
val dataIsPresentSet = WireDefault(0.U(params.numEntries.W))
val dataIsPresentClr = WireDefault(0.U(params.numEntries.W))
valid := (valid & ~validClr) | validSet
used := (used & ~usedClr) | usedSet
dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet
/* Reservation logic signals */
val reserveTail = Wire(UInt(params.entryBits.W))
val reserveIsValid = Wire(Bool())
/* Response logic signals */
val responseIndex = Wire(UInt(params.entryBits.W))
val responseListIndex = Wire(UInt(params.listBits.W))
val responseHead = Wire(UInt(params.entryBits.W))
val responseTail = Wire(UInt(params.entryBits.W))
val nextResponseHead = Wire(UInt(params.entryBits.W))
val nextDataIsPresent = Wire(Bool())
val isResponseInOrder = Wire(Bool())
val isEndOfList = Wire(Bool())
val isLastBeat = Wire(Bool())
val isLastResponseBeat = Wire(Bool())
val isLastUnwindBeat = Wire(Bool())
/* Reservation logic */
reserveTail := tail.read(ioReserve.bits)
reserveIsValid := valid(ioReserve.bits)
ioReserve.ready := !used.andR
// When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we
// actually start a new list, rather than appending to a list that's about to disappear.
val reserveResponseSameList = ioReserve.bits === responseListIndex
val appendToAndDestroyList =
ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat
when(ioReserve.fire) {
validSet := UIntToOH(ioReserve.bits, params.numLists)
usedSet := freeOH
when(reserveIsValid && !appendToAndDestroyList) {
next.write(reserveTail, freeIndex)
}.otherwise {
head.write(ioReserve.bits, freeIndex)
}
tail.write(ioReserve.bits, freeIndex)
map.write(freeIndex, ioReserve.bits)
}
/* Response logic */
// The majority of the response logic (reading from and writing to the various RAMs) is common between the
// response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid).
// The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the
// 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and
// response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after
// two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker.
responseHead := head.read(responseListIndex)
responseTail := tail.read(responseListIndex)
nextResponseHead := next.read(responseIndex)
nextDataIsPresent := dataIsPresent(nextResponseHead)
// Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since
// there isn't a next element in the linked list.
isResponseInOrder := responseHead === responseIndex
isEndOfList := responseHead === responseTail
isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1
// When a response's last beat is sent to the output channel, mark it as completed. This can happen in two
// situations:
// 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM
// reservation was never needed.
// 2. An entry is read out of the 'data' SRAM (within the unwind FSM).
when(ioDataOut.fire && isLastBeat) {
// Mark the reservation as no-longer-used.
usedClr := UIntToOH(responseIndex, params.numEntries)
// If the response is in-order, then we're popping an element from this linked list.
when(isEndOfList) {
// Once we pop the last element from a linked list, mark it as no-longer-present.
validClr := UIntToOH(responseListIndex, params.numLists)
}.otherwise {
// Move the linked list's head pointer to the new head pointer.
head.write(responseListIndex, nextResponseHead)
}
}
// If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding.
when(ioResponse.fire && !isResponseInOrder) {
dataMemWriteEnable := true.B
when(isLastResponseBeat) {
dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries)
beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1)
}
}
// Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to.
val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats)
(responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) =>
when(select && dataMemWriteEnable) {
seqMem.write(ioResponse.bits.index, ioResponse.bits.data)
}
}
/* Response unwind logic */
// Unwind FSM state definitions
val sIdle :: sUnwinding :: Nil = Enum(2)
val unwindState = RegInit(sIdle)
val busyUnwinding = unwindState === sUnwinding
val startUnwind = Wire(Bool())
val stopUnwind = Wire(Bool())
when(startUnwind) {
unwindState := sUnwinding
}.elsewhen(stopUnwind) {
unwindState := sIdle
}
assert(!(startUnwind && stopUnwind))
// Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to
// become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is
// invalid.
//
// Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to
// worry about overwriting the 'data' SRAM's output when we start the unwind FSM.
startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent
// Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of
// two things happens:
// 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent)
// 2. There are no more outstanding responses in this list (isEndOfList)
//
// Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are
// passing from 'ioResponse' to 'ioDataOut'.
stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList)
val isUnwindBurstOver = Wire(Bool())
val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable)
// Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of
// beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we
// increment 'beatCounter' until it reaches 'unwindBeats1'.
val unwindBeats1 = Reg(UInt(params.beatBits.W))
val nextBeatCounter = Wire(UInt(params.beatBits.W))
val beatCounter = RegNext(nextBeatCounter)
isUnwindBurstOver := beatCounter === unwindBeats1
when(startNewBurst) {
unwindBeats1 := beats.read(nextResponseHead)
nextBeatCounter := 0.U
}.elsewhen(dataMemReadEnable) {
nextBeatCounter := beatCounter + 1.U
}.otherwise {
nextBeatCounter := beatCounter
}
// When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next
// entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which
// happens at the start of reading a new stored burst).
val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst)
responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index)
// Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the
// SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead
// holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'.
val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex)
// The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid
// until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle).
val unwindDataIsValid = RegInit(false.B)
when(dataMemReadEnable) {
unwindDataIsValid := true.B
}.elsewhen(ioDataOut.fire) {
unwindDataIsValid := false.B
}
isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid
// Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats.
isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat)
// Select which SRAM to read from based on the beat counter.
val dataOutputVec = Wire(Vec(params.numBeats, gen))
val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats)
(nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) =>
dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable)
}
// Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured
// by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading
// from.
val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable)
// Mark 'data' burst entries as no-longer-present as they get read out of the SRAM.
when(dataMemReadEnable) {
dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries)
}
// As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue
// a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know
// we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be
// consumed by the output channel).
val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready
dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem)
// While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need
// 'responseListIndex' to be coherent for the entire unwind process.
val rawResponseListIndex = map.read(responseIndex)
val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst)
responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex)
// Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are
// just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that
// could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be
// single-ported.
ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding
// Either pass an in-order response to the output channel, or data read from the unwind FSM.
ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder)
ioDataOut.bits.listIndex := responseListIndex
ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data)
// It's an error to get a response that isn't associated with a valid linked list.
when(ioResponse.fire || unwindDataIsValid) {
assert(
valid(responseListIndex),
"No linked list exists at index %d, mapped from %d",
responseListIndex,
responseIndex
)
}
when(busyUnwinding && dataMemReadEnable) {
assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order")
}
}
/** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1.
*
* Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to
* reorder any responses, or store any linked lists.
*/
class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1")
val used = RegInit(0.U(params.numEntries.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
used := (used & ~usedClr) | usedSet
ioReserve.ready := used === 0.U
// Store which list index was reserved, we need to return this value when we get a response.
when(ioReserve.fire) {
usedSet := 1.U
map.write(0.U, ioReserve.bits)
}
// There's only one valid linked list entry, which is at index 0.
ioReservedIndex := 0.U
val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1
// Mark the linked list as empty when we get the last beat in a response.
// Note that 'ioResponse.fire === ioDataOut.fire'.
when(ioResponse.fire && isLastResponseBeat) {
usedClr := 1.U
}
// Always pass the response data straight through, since we never need to reorder the response data.
ioDataOut.bits.listIndex := map.read(0.U)
ioDataOut.bits.payload := ioResponse.bits.data
ioDataOut.valid := ioResponse.valid
ioResponse.ready := ioDataOut.ready
}
| module dataMems_458( // @[UnsafeAXI4ToTL.scala:365:62]
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [66:0] R0_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [66:0] W0_data
);
dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62]
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data)
); // @[UnsafeAXI4ToTL.scala:365:62]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RecFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import consts._
class
RecFNToRecFN(
inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int)
extends chisel3.RawModule
{
val io = IO(new Bundle {
val in = Input(Bits((inExpWidth + inSigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawIn = rawFloatFromRecFN(inExpWidth, inSigWidth, io.in);
if ((inExpWidth == outExpWidth) && (inSigWidth <= outSigWidth)) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
io.out := io.in<<(outSigWidth - inSigWidth)
io.exceptionFlags := isSigNaNRawFloat(rawIn) ## 0.U(4.W)
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
inExpWidth,
inSigWidth,
outExpWidth,
outSigWidth,
flRoundOpt_sigMSBitAlwaysZero
))
roundAnyRawFNToRecFN.io.invalidExc := isSigNaNRawFloat(rawIn)
roundAnyRawFNToRecFN.io.infiniteExc := false.B
roundAnyRawFNToRecFN.io.in := rawIn
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
}
File rawFloatFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
/*----------------------------------------------------------------------------
| In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be
| set.
*----------------------------------------------------------------------------*/
object rawFloatFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat =
{
val exp = in(expWidth + sigWidth - 1, sigWidth - 1)
val isZero = exp(expWidth, expWidth - 2) === 0.U
val isSpecial = exp(expWidth, expWidth - 1) === 3.U
val out = Wire(new RawFloat(expWidth, sigWidth))
out.isNaN := isSpecial && exp(expWidth - 2)
out.isInf := isSpecial && ! exp(expWidth - 2)
out.isZero := isZero
out.sign := in(expWidth + sigWidth)
out.sExp := exp.zext
out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0)
out
}
}
| module RecFNToRecFN_220( // @[RecFNToRecFN.scala:44:5]
input [32:0] io_in, // @[RecFNToRecFN.scala:48:16]
output [32:0] io_out // @[RecFNToRecFN.scala:48:16]
);
wire [32:0] io_in_0 = io_in; // @[RecFNToRecFN.scala:44:5]
wire io_detectTininess = 1'h1; // @[RecFNToRecFN.scala:44:5, :48:16]
wire [2:0] io_roundingMode = 3'h0; // @[RecFNToRecFN.scala:44:5, :48:16]
wire [32:0] _io_out_T = io_in_0; // @[RecFNToRecFN.scala:44:5, :64:35]
wire [4:0] _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:65:54]
wire [32:0] io_out_0; // @[RecFNToRecFN.scala:44:5]
wire [4:0] io_exceptionFlags; // @[RecFNToRecFN.scala:44:5]
wire [8:0] rawIn_exp = io_in_0[31:23]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _rawIn_isZero_T = rawIn_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire rawIn_isZero = _rawIn_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
wire rawIn_isZero_0 = rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _rawIn_isSpecial_T = rawIn_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire rawIn_isSpecial = &_rawIn_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
wire _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [9:0] _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [24:0] _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire rawIn_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [9:0] rawIn_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] rawIn_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _rawIn_out_isNaN_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _rawIn_out_isInf_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _rawIn_out_isNaN_T_1 = rawIn_isSpecial & _rawIn_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign rawIn_isNaN = _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _rawIn_out_isInf_T_1 = ~_rawIn_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _rawIn_out_isInf_T_2 = rawIn_isSpecial & _rawIn_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign rawIn_isInf = _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _rawIn_out_sign_T = io_in_0[32]; // @[rawFloatFromRecFN.scala:59:25]
assign rawIn_sign = _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _rawIn_out_sExp_T = {1'h0, rawIn_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign rawIn_sExp = _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _rawIn_out_sig_T = ~rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _rawIn_out_sig_T_1 = {1'h0, _rawIn_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [22:0] _rawIn_out_sig_T_2 = io_in_0[22:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _rawIn_out_sig_T_3 = {_rawIn_out_sig_T_1, _rawIn_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign rawIn_sig = _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
assign io_out_0 = _io_out_T; // @[RecFNToRecFN.scala:44:5, :64:35]
wire _io_exceptionFlags_T = rawIn_sig[22]; // @[rawFloatFromRecFN.scala:55:23]
wire _io_exceptionFlags_T_1 = ~_io_exceptionFlags_T; // @[common.scala:82:{49,56}]
wire _io_exceptionFlags_T_2 = rawIn_isNaN & _io_exceptionFlags_T_1; // @[rawFloatFromRecFN.scala:55:23]
assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, 4'h0}; // @[common.scala:82:46]
assign io_exceptionFlags = _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:44:5, :65:54]
assign io_out = io_out_0; // @[RecFNToRecFN.scala:44:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module MacUnit_249( // @[PE.scala:14:7]
input clock, // @[PE.scala:14:7]
input reset, // @[PE.scala:14:7]
input [7:0] io_in_a, // @[PE.scala:16:14]
input [7:0] io_in_b, // @[PE.scala:16:14]
input [31:0] io_in_c, // @[PE.scala:16:14]
output [19:0] io_out_d // @[PE.scala:16:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7]
wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7]
wire [31:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7]
wire [19:0] io_out_d_0; // @[PE.scala:14:7]
wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7]
wire [32:0] _io_out_d_T_1 = {{17{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[31], io_in_c_0}; // @[PE.scala:14:7]
wire [31:0] _io_out_d_T_2 = _io_out_d_T_1[31:0]; // @[Arithmetic.scala:93:54]
wire [31:0] _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54]
assign io_out_d_0 = _io_out_d_T_3[19:0]; // @[PE.scala:14:7, :23:12]
assign io_out_d = io_out_d_0; // @[PE.scala:14:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module MacUnit_223( // @[PE.scala:14:7]
input clock, // @[PE.scala:14:7]
input reset, // @[PE.scala:14:7]
input [7:0] io_in_a, // @[PE.scala:16:14]
input [7:0] io_in_b, // @[PE.scala:16:14]
input [19:0] io_in_c, // @[PE.scala:16:14]
output [19:0] io_out_d // @[PE.scala:16:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7]
wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7]
wire [19:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7]
wire [19:0] _io_out_d_T_3; // @[Arithmetic.scala:93:54]
wire [19:0] io_out_d_0; // @[PE.scala:14:7]
wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7]
wire [20:0] _io_out_d_T_1 = {{5{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[19], io_in_c_0}; // @[PE.scala:14:7]
wire [19:0] _io_out_d_T_2 = _io_out_d_T_1[19:0]; // @[Arithmetic.scala:93:54]
assign _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54]
assign io_out_d_0 = _io_out_d_T_3; // @[PE.scala:14:7]
assign io_out_d = io_out_d_0; // @[PE.scala:14:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_151( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_161 io_out_source_valid ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
File Arbiter.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
object TLArbiter
{
// (valids, select) => readys
type Policy = (Integer, UInt, Bool) => UInt
val lowestIndexFirst: Policy = (width, valids, select) => ~(leftOR(valids) << 1)(width-1, 0)
val highestIndexFirst: Policy = (width, valids, select) => ~((rightOR(valids) >> 1).pad(width))
val roundRobin: Policy = (width, valids, select) => if (width == 1) 1.U(1.W) else {
val valid = valids(width-1, 0)
assert (valid === valids)
val mask = RegInit(((BigInt(1) << width)-1).U(width-1,0))
val filter = Cat(valid & ~mask, valid)
val unready = (rightOR(filter, width*2, width) >> 1) | (mask << width)
val readys = ~((unready >> width) & unready(width-1, 0))
when (select && valid.orR) {
mask := leftOR(readys & valid, width)
}
readys(width-1, 0)
}
def lowestFromSeq[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: Seq[DecoupledIO[T]]): Unit = {
apply(lowestIndexFirst)(sink, sources.map(s => (edge.numBeats1(s.bits), s)):_*)
}
def lowest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = {
apply(lowestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*)
}
def highest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = {
apply(highestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*)
}
def robin[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = {
apply(roundRobin)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*)
}
def apply[T <: Data](policy: Policy)(sink: DecoupledIO[T], sources: (UInt, DecoupledIO[T])*): Unit = {
if (sources.isEmpty) {
sink.bits := DontCare
} else if (sources.size == 1) {
sink :<>= sources.head._2
} else {
val pairs = sources.toList
val beatsIn = pairs.map(_._1)
val sourcesIn = pairs.map(_._2)
// The number of beats which remain to be sent
val beatsLeft = RegInit(0.U)
val idle = beatsLeft === 0.U
val latch = idle && sink.ready // winner (if any) claims sink
// Who wants access to the sink?
val valids = sourcesIn.map(_.valid)
// Arbitrate amongst the requests
val readys = VecInit(policy(valids.size, Cat(valids.reverse), latch).asBools)
// Which request wins arbitration?
val winner = VecInit((readys zip valids) map { case (r,v) => r&&v })
// Confirm the policy works properly
require (readys.size == valids.size)
// Never two winners
val prefixOR = winner.scanLeft(false.B)(_||_).init
assert((prefixOR zip winner) map { case (p,w) => !p || !w } reduce {_ && _})
// If there was any request, there is a winner
assert (!valids.reduce(_||_) || winner.reduce(_||_))
// Track remaining beats
val maskedBeats = (winner zip beatsIn) map { case (w,b) => Mux(w, b, 0.U) }
val initBeats = maskedBeats.reduce(_ | _) // no winner => 0 beats
beatsLeft := Mux(latch, initBeats, beatsLeft - sink.fire)
// The one-hot source granted access in the previous cycle
val state = RegInit(VecInit(Seq.fill(sources.size)(false.B)))
val muxState = Mux(idle, winner, state)
state := muxState
val allowed = Mux(idle, readys, state)
(sourcesIn zip allowed) foreach { case (s, r) =>
s.ready := sink.ready && r
}
sink.valid := Mux(idle, valids.reduce(_||_), Mux1H(state, valids))
sink.bits :<= Mux1H(muxState, sourcesIn.map(_.bits))
}
}
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
abstract class DecoupledArbiterTest(
policy: TLArbiter.Policy,
txns: Int,
timeout: Int,
val numSources: Int,
beatsLeftFromIdx: Int => UInt)
(implicit p: Parameters) extends UnitTest(timeout)
{
val sources = Wire(Vec(numSources, DecoupledIO(UInt(log2Ceil(numSources).W))))
dontTouch(sources.suggestName("sources"))
val sink = Wire(DecoupledIO(UInt(log2Ceil(numSources).W)))
dontTouch(sink.suggestName("sink"))
val count = RegInit(0.U(log2Ceil(txns).W))
val lfsr = LFSR(16, true.B)
sources.zipWithIndex.map { case (z, i) => z.bits := i.U }
TLArbiter(policy)(sink, sources.zipWithIndex.map {
case (z, i) => (beatsLeftFromIdx(i), z)
}:_*)
count := count + 1.U
io.finished := count >= txns.U
}
/** This tests that when a specific pattern of source valids are driven,
* a new index from amongst that pattern is always selected,
* unless one of those sources takes multiple beats,
* in which case the same index should be selected until the arbiter goes idle.
*/
class TLDecoupledArbiterRobinTest(txns: Int = 128, timeout: Int = 500000, print: Boolean = false)
(implicit p: Parameters)
extends DecoupledArbiterTest(TLArbiter.roundRobin, txns, timeout, 6, i => i.U)
{
val lastWinner = RegInit((numSources+1).U)
val beatsLeft = RegInit(0.U(log2Ceil(numSources).W))
val first = lastWinner > numSources.U
val valid = lfsr(0)
val ready = lfsr(15)
sink.ready := ready
sources.zipWithIndex.map { // pattern: every even-indexed valid is driven the same random way
case (s, i) => s.valid := (if (i % 2 == 1) false.B else valid)
}
when (sink.fire) {
if (print) { printf("TestRobin: %d\n", sink.bits) }
when (beatsLeft === 0.U) {
assert(lastWinner =/= sink.bits, "Round robin did not pick a new idx despite one being valid.")
lastWinner := sink.bits
beatsLeft := sink.bits
} .otherwise {
assert(lastWinner === sink.bits, "Round robin did not pick the same index over multiple beats")
beatsLeft := beatsLeft - 1.U
}
}
if (print) {
when (!sink.fire) { printf("TestRobin: idle (%d %d)\n", valid, ready) }
}
}
/** This tests that the lowest index is always selected across random single cycle transactions. */
class TLDecoupledArbiterLowestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters)
extends DecoupledArbiterTest(TLArbiter.lowestIndexFirst, txns, timeout, 15, _ => 0.U)
{
def assertLowest(id: Int): Unit = {
when (sources(id).valid) {
assert((numSources-1 until id by -1).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a higher valid source was granted ready.")
}
}
sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) }
sink.ready := lfsr(15)
when (sink.fire) { (0 until numSources).foreach(assertLowest(_)) }
}
/** This tests that the highest index is always selected across random single cycle transactions. */
class TLDecoupledArbiterHighestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters)
extends DecoupledArbiterTest(TLArbiter.highestIndexFirst, txns, timeout, 15, _ => 0.U)
{
def assertHighest(id: Int): Unit = {
when (sources(id).valid) {
assert((0 until id).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a lower valid source was granted ready.")
}
}
sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) }
sink.ready := lfsr(15)
when (sink.fire) { (0 until numSources).foreach(assertHighest(_)) }
}
File Xbar.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressDecoder, AddressSet, RegionType, IdRange, TriStateValue}
import freechips.rocketchip.util.BundleField
// Trades off slave port proximity against routing resource cost
object ForceFanout
{
def apply[T](
a: TriStateValue = TriStateValue.unset,
b: TriStateValue = TriStateValue.unset,
c: TriStateValue = TriStateValue.unset,
d: TriStateValue = TriStateValue.unset,
e: TriStateValue = TriStateValue.unset)(body: Parameters => T)(implicit p: Parameters) =
{
body(p.alterPartial {
case ForceFanoutKey => p(ForceFanoutKey) match {
case ForceFanoutParams(pa, pb, pc, pd, pe) =>
ForceFanoutParams(a.update(pa), b.update(pb), c.update(pc), d.update(pd), e.update(pe))
}
})
}
}
private case class ForceFanoutParams(a: Boolean, b: Boolean, c: Boolean, d: Boolean, e: Boolean)
private case object ForceFanoutKey extends Field(ForceFanoutParams(false, false, false, false, false))
class TLXbar(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule
{
val node = new TLNexusNode(
clientFn = { seq =>
seq(0).v1copy(
echoFields = BundleField.union(seq.flatMap(_.echoFields)),
requestFields = BundleField.union(seq.flatMap(_.requestFields)),
responseKeys = seq.flatMap(_.responseKeys).distinct,
minLatency = seq.map(_.minLatency).min,
clients = (TLXbar.mapInputIds(seq) zip seq) flatMap { case (range, port) =>
port.clients map { client => client.v1copy(
sourceId = client.sourceId.shift(range.start)
)}
}
)
},
managerFn = { seq =>
val fifoIdFactory = TLXbar.relabeler()
seq(0).v1copy(
responseFields = BundleField.union(seq.flatMap(_.responseFields)),
requestKeys = seq.flatMap(_.requestKeys).distinct,
minLatency = seq.map(_.minLatency).min,
endSinkId = TLXbar.mapOutputIds(seq).map(_.end).max,
managers = seq.flatMap { port =>
require (port.beatBytes == seq(0).beatBytes,
s"Xbar ($name with parent $parent) data widths don't match: ${port.managers.map(_.name)} has ${port.beatBytes}B vs ${seq(0).managers.map(_.name)} has ${seq(0).beatBytes}B")
val fifoIdMapper = fifoIdFactory()
port.managers map { manager => manager.v1copy(
fifoId = manager.fifoId.map(fifoIdMapper(_))
)}
}
)
}
){
override def circuitIdentity = outputs.size == 1 && inputs.size == 1
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
if ((node.in.size * node.out.size) > (8*32)) {
println (s"!!! WARNING !!!")
println (s" Your TLXbar ($name with parent $parent) is very large, with ${node.in.size} Masters and ${node.out.size} Slaves.")
println (s"!!! WARNING !!!")
}
val wide_bundle = TLBundleParameters.union((node.in ++ node.out).map(_._2.bundle))
override def desiredName = (Seq("TLXbar") ++ nameSuffix ++ Seq(s"i${node.in.size}_o${node.out.size}_${wide_bundle.shortName}")).mkString("_")
TLXbar.circuit(policy, node.in, node.out)
}
}
object TLXbar
{
def mapInputIds(ports: Seq[TLMasterPortParameters]) = assignRanges(ports.map(_.endSourceId))
def mapOutputIds(ports: Seq[TLSlavePortParameters]) = assignRanges(ports.map(_.endSinkId))
def assignRanges(sizes: Seq[Int]) = {
val pow2Sizes = sizes.map { z => if (z == 0) 0 else 1 << log2Ceil(z) }
val tuples = pow2Sizes.zipWithIndex.sortBy(_._1) // record old index, then sort by increasing size
val starts = tuples.scanRight(0)(_._1 + _).tail // suffix-sum of the sizes = the start positions
val ranges = (tuples zip starts) map { case ((sz, i), st) =>
(if (sz == 0) IdRange(0, 0) else IdRange(st, st + sz), i)
}
ranges.sortBy(_._2).map(_._1) // Restore orignal order
}
def relabeler() = {
var idFactory = 0
() => {
val fifoMap = scala.collection.mutable.HashMap.empty[Int, Int]
(x: Int) => {
if (fifoMap.contains(x)) fifoMap(x) else {
val out = idFactory
idFactory = idFactory + 1
fifoMap += (x -> out)
out
}
}
}
}
def circuit(policy: TLArbiter.Policy, seqIn: Seq[(TLBundle, TLEdge)], seqOut: Seq[(TLBundle, TLEdge)]) {
val (io_in, edgesIn) = seqIn.unzip
val (io_out, edgesOut) = seqOut.unzip
// Not every master need connect to every slave on every channel; determine which connections are necessary
val reachableIO = edgesIn.map { cp => edgesOut.map { mp =>
cp.client.clients.exists { c => mp.manager.managers.exists { m =>
c.visibility.exists { ca => m.address.exists { ma =>
ca.overlaps(ma)}}}}
}.toVector}.toVector
val probeIO = (edgesIn zip reachableIO).map { case (cp, reachableO) =>
(edgesOut zip reachableO).map { case (mp, reachable) =>
reachable && cp.client.anySupportProbe && mp.manager.managers.exists(_.regionType >= RegionType.TRACKED)
}.toVector}.toVector
val releaseIO = (edgesIn zip reachableIO).map { case (cp, reachableO) =>
(edgesOut zip reachableO).map { case (mp, reachable) =>
reachable && cp.client.anySupportProbe && mp.manager.anySupportAcquireB
}.toVector}.toVector
val connectAIO = reachableIO
val connectBIO = probeIO
val connectCIO = releaseIO
val connectDIO = reachableIO
val connectEIO = releaseIO
def transpose[T](x: Seq[Seq[T]]) = if (x.isEmpty) Nil else Vector.tabulate(x(0).size) { i => Vector.tabulate(x.size) { j => x(j)(i) } }
val connectAOI = transpose(connectAIO)
val connectBOI = transpose(connectBIO)
val connectCOI = transpose(connectCIO)
val connectDOI = transpose(connectDIO)
val connectEOI = transpose(connectEIO)
// Grab the port ID mapping
val inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client))
val outputIdRanges = TLXbar.mapOutputIds(edgesOut.map(_.manager))
// We need an intermediate size of bundle with the widest possible identifiers
val wide_bundle = TLBundleParameters.union(io_in.map(_.params) ++ io_out.map(_.params))
// Handle size = 1 gracefully (Chisel3 empty range is broken)
def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0)
// Transform input bundle sources (sinks use global namespace on both sides)
val in = Wire(Vec(io_in.size, TLBundle(wide_bundle)))
for (i <- 0 until in.size) {
val r = inputIdRanges(i)
if (connectAIO(i).exists(x=>x)) {
in(i).a.bits.user := DontCare
in(i).a.squeezeAll.waiveAll :<>= io_in(i).a.squeezeAll.waiveAll
in(i).a.bits.source := io_in(i).a.bits.source | r.start.U
} else {
in(i).a := DontCare
io_in(i).a := DontCare
in(i).a.valid := false.B
io_in(i).a.ready := true.B
}
if (connectBIO(i).exists(x=>x)) {
io_in(i).b.squeezeAll :<>= in(i).b.squeezeAll
io_in(i).b.bits.source := trim(in(i).b.bits.source, r.size)
} else {
in(i).b := DontCare
io_in(i).b := DontCare
in(i).b.ready := true.B
io_in(i).b.valid := false.B
}
if (connectCIO(i).exists(x=>x)) {
in(i).c.bits.user := DontCare
in(i).c.squeezeAll.waiveAll :<>= io_in(i).c.squeezeAll.waiveAll
in(i).c.bits.source := io_in(i).c.bits.source | r.start.U
} else {
in(i).c := DontCare
io_in(i).c := DontCare
in(i).c.valid := false.B
io_in(i).c.ready := true.B
}
if (connectDIO(i).exists(x=>x)) {
io_in(i).d.squeezeAll.waiveAll :<>= in(i).d.squeezeAll.waiveAll
io_in(i).d.bits.source := trim(in(i).d.bits.source, r.size)
} else {
in(i).d := DontCare
io_in(i).d := DontCare
in(i).d.ready := true.B
io_in(i).d.valid := false.B
}
if (connectEIO(i).exists(x=>x)) {
in(i).e.squeezeAll :<>= io_in(i).e.squeezeAll
} else {
in(i).e := DontCare
io_in(i).e := DontCare
in(i).e.valid := false.B
io_in(i).e.ready := true.B
}
}
// Transform output bundle sinks (sources use global namespace on both sides)
val out = Wire(Vec(io_out.size, TLBundle(wide_bundle)))
for (o <- 0 until out.size) {
val r = outputIdRanges(o)
if (connectAOI(o).exists(x=>x)) {
out(o).a.bits.user := DontCare
io_out(o).a.squeezeAll.waiveAll :<>= out(o).a.squeezeAll.waiveAll
} else {
out(o).a := DontCare
io_out(o).a := DontCare
out(o).a.ready := true.B
io_out(o).a.valid := false.B
}
if (connectBOI(o).exists(x=>x)) {
out(o).b.squeezeAll :<>= io_out(o).b.squeezeAll
} else {
out(o).b := DontCare
io_out(o).b := DontCare
out(o).b.valid := false.B
io_out(o).b.ready := true.B
}
if (connectCOI(o).exists(x=>x)) {
out(o).c.bits.user := DontCare
io_out(o).c.squeezeAll.waiveAll :<>= out(o).c.squeezeAll.waiveAll
} else {
out(o).c := DontCare
io_out(o).c := DontCare
out(o).c.ready := true.B
io_out(o).c.valid := false.B
}
if (connectDOI(o).exists(x=>x)) {
out(o).d.squeezeAll :<>= io_out(o).d.squeezeAll
out(o).d.bits.sink := io_out(o).d.bits.sink | r.start.U
} else {
out(o).d := DontCare
io_out(o).d := DontCare
out(o).d.valid := false.B
io_out(o).d.ready := true.B
}
if (connectEOI(o).exists(x=>x)) {
io_out(o).e.squeezeAll :<>= out(o).e.squeezeAll
io_out(o).e.bits.sink := trim(out(o).e.bits.sink, r.size)
} else {
out(o).e := DontCare
io_out(o).e := DontCare
out(o).e.ready := true.B
io_out(o).e.valid := false.B
}
}
// Filter a list to only those elements selected
def filter[T](data: Seq[T], mask: Seq[Boolean]) = (data zip mask).filter(_._2).map(_._1)
// Based on input=>output connectivity, create per-input minimal address decode circuits
val requiredAC = (connectAIO ++ connectCIO).distinct
val outputPortFns: Map[Vector[Boolean], Seq[UInt => Bool]] = requiredAC.map { connectO =>
val port_addrs = edgesOut.map(_.manager.managers.flatMap(_.address))
val routingMask = AddressDecoder(filter(port_addrs, connectO))
val route_addrs = port_addrs.map(seq => AddressSet.unify(seq.map(_.widen(~routingMask)).distinct))
// Print the address mapping
if (false) {
println("Xbar mapping:")
route_addrs.foreach { p =>
print(" ")
p.foreach { a => print(s" ${a}") }
println("")
}
println("--")
}
(connectO, route_addrs.map(seq => (addr: UInt) => seq.map(_.contains(addr)).reduce(_ || _)))
}.toMap
// Print the ID mapping
if (false) {
println(s"XBar mapping:")
(edgesIn zip inputIdRanges).zipWithIndex.foreach { case ((edge, id), i) =>
println(s"\t$i assigned ${id} for ${edge.client.clients.map(_.name).mkString(", ")}")
}
println("")
}
val addressA = (in zip edgesIn) map { case (i, e) => e.address(i.a.bits) }
val addressC = (in zip edgesIn) map { case (i, e) => e.address(i.c.bits) }
def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B
val requestAIO = (connectAIO zip addressA) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } }
val requestCIO = (connectCIO zip addressC) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } }
val requestBOI = out.map { o => inputIdRanges.map { i => i.contains(o.b.bits.source) } }
val requestDOI = out.map { o => inputIdRanges.map { i => i.contains(o.d.bits.source) } }
val requestEIO = in.map { i => outputIdRanges.map { o => o.contains(i.e.bits.sink) } }
val beatsAI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.a.bits) }
val beatsBO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.b.bits) }
val beatsCI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.c.bits) }
val beatsDO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.d.bits) }
val beatsEI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.e.bits) }
// Fanout the input sources to the output sinks
val portsAOI = transpose((in zip requestAIO) map { case (i, r) => TLXbar.fanout(i.a, r, edgesOut.map(_.params(ForceFanoutKey).a)) })
val portsBIO = transpose((out zip requestBOI) map { case (o, r) => TLXbar.fanout(o.b, r, edgesIn .map(_.params(ForceFanoutKey).b)) })
val portsCOI = transpose((in zip requestCIO) map { case (i, r) => TLXbar.fanout(i.c, r, edgesOut.map(_.params(ForceFanoutKey).c)) })
val portsDIO = transpose((out zip requestDOI) map { case (o, r) => TLXbar.fanout(o.d, r, edgesIn .map(_.params(ForceFanoutKey).d)) })
val portsEOI = transpose((in zip requestEIO) map { case (i, r) => TLXbar.fanout(i.e, r, edgesOut.map(_.params(ForceFanoutKey).e)) })
// Arbitrate amongst the sources
for (o <- 0 until out.size) {
TLArbiter(policy)(out(o).a, filter(beatsAI zip portsAOI(o), connectAOI(o)):_*)
TLArbiter(policy)(out(o).c, filter(beatsCI zip portsCOI(o), connectCOI(o)):_*)
TLArbiter(policy)(out(o).e, filter(beatsEI zip portsEOI(o), connectEOI(o)):_*)
filter(portsAOI(o), connectAOI(o).map(!_)) foreach { r => r.ready := false.B }
filter(portsCOI(o), connectCOI(o).map(!_)) foreach { r => r.ready := false.B }
filter(portsEOI(o), connectEOI(o).map(!_)) foreach { r => r.ready := false.B }
}
for (i <- 0 until in.size) {
TLArbiter(policy)(in(i).b, filter(beatsBO zip portsBIO(i), connectBIO(i)):_*)
TLArbiter(policy)(in(i).d, filter(beatsDO zip portsDIO(i), connectDIO(i)):_*)
filter(portsBIO(i), connectBIO(i).map(!_)) foreach { r => r.ready := false.B }
filter(portsDIO(i), connectDIO(i).map(!_)) foreach { r => r.ready := false.B }
}
}
def apply(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode =
{
val xbar = LazyModule(new TLXbar(policy, nameSuffix))
xbar.node
}
// Replicate an input port to each output port
def fanout[T <: TLChannel](input: DecoupledIO[T], select: Seq[Bool], force: Seq[Boolean] = Nil): Seq[DecoupledIO[T]] = {
val filtered = Wire(Vec(select.size, chiselTypeOf(input)))
for (i <- 0 until select.size) {
filtered(i).bits := (if (force.lift(i).getOrElse(false)) IdentityModule(input.bits) else input.bits)
filtered(i).valid := input.valid && (select(i) || (select.size == 1).B)
}
input.ready := Mux1H(select, filtered.map(_.ready))
filtered
}
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMXbar(nManagers: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("Xbar"))
val xbar = LazyModule(new TLXbar)
xbar.node := TLDelayer(0.1) := model.node := fuzz.node
(0 until nManagers) foreach { n =>
val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff)))
ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMXbarTest(nManagers: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMXbar(nManagers,txns)).module)
dut.io.start := io.start
io.finished := dut.io.finished
}
class TLMulticlientXbar(nManagers: Int, nClients: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val xbar = LazyModule(new TLXbar)
val fuzzers = (0 until nClients) map { n =>
val fuzz = LazyModule(new TLFuzzer(txns))
xbar.node := TLDelayer(0.1) := fuzz.node
fuzz
}
(0 until nManagers) foreach { n =>
val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff)))
ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzzers.last.module.io.finished
}
}
class TLMulticlientXbarTest(nManagers: Int, nClients: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLMulticlientXbar(nManagers, nClients, txns)).module)
dut.io.start := io.start
io.finished := dut.io.finished
}
| module TLXbar_MasterXbar_BoomTile_i2_o1_a32d64s3k3z4c( // @[Xbar.scala:74:9]
input clock, // @[Xbar.scala:74:9]
input reset, // @[Xbar.scala:74:9]
output auto_anon_in_1_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_1_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_1_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_1_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_anon_in_1_a_bits_size, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_1_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_anon_in_1_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_anon_in_1_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_anon_in_1_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_1_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_1_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_1_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_in_1_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_anon_in_1_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_anon_in_1_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_in_1_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_1_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_anon_in_1_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_1_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_0_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_0_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_0_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_0_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_anon_in_0_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_anon_in_0_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_anon_in_0_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_anon_in_0_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_anon_in_0_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_0_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_0_b_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_0_b_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_in_0_b_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_anon_in_0_b_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_anon_in_0_b_bits_size, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_anon_in_0_b_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_anon_in_0_b_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_anon_in_0_b_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_anon_in_0_b_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_0_b_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_0_c_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_0_c_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_0_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_0_c_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_anon_in_0_c_bits_size, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_anon_in_0_c_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_anon_in_0_c_bits_address, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_anon_in_0_c_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_0_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_0_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_0_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_in_0_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_anon_in_0_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_anon_in_0_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_anon_in_0_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_in_0_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_0_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_anon_in_0_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_0_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_0_e_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_0_e_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_0_e_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_anon_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_anon_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_anon_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_anon_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_b_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_b_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_out_b_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_anon_out_b_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_anon_out_b_bits_size, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_out_b_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_anon_out_b_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_anon_out_b_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_anon_out_b_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_b_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_c_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_c_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_c_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_anon_out_c_bits_size, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_c_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_anon_out_c_bits_address, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_anon_out_c_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_anon_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_anon_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_anon_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_e_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_e_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_e_bits_sink // @[LazyModuleImp.scala:107:25]
);
wire [2:0] out_0_e_bits_sink; // @[Xbar.scala:216:19]
wire [2:0] out_0_d_bits_sink; // @[Xbar.scala:216:19]
wire [2:0] in_1_a_bits_source; // @[Xbar.scala:159:18]
wire [2:0] in_0_c_bits_source; // @[Xbar.scala:159:18]
wire [2:0] in_0_a_bits_source; // @[Xbar.scala:159:18]
wire auto_anon_in_1_a_valid_0 = auto_anon_in_1_a_valid; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_in_1_a_bits_opcode_0 = auto_anon_in_1_a_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_in_1_a_bits_param_0 = auto_anon_in_1_a_bits_param; // @[Xbar.scala:74:9]
wire [3:0] auto_anon_in_1_a_bits_size_0 = auto_anon_in_1_a_bits_size; // @[Xbar.scala:74:9]
wire auto_anon_in_1_a_bits_source_0 = auto_anon_in_1_a_bits_source; // @[Xbar.scala:74:9]
wire [31:0] auto_anon_in_1_a_bits_address_0 = auto_anon_in_1_a_bits_address; // @[Xbar.scala:74:9]
wire [7:0] auto_anon_in_1_a_bits_mask_0 = auto_anon_in_1_a_bits_mask; // @[Xbar.scala:74:9]
wire [63:0] auto_anon_in_1_a_bits_data_0 = auto_anon_in_1_a_bits_data; // @[Xbar.scala:74:9]
wire auto_anon_in_1_a_bits_corrupt_0 = auto_anon_in_1_a_bits_corrupt; // @[Xbar.scala:74:9]
wire auto_anon_in_1_d_ready_0 = auto_anon_in_1_d_ready; // @[Xbar.scala:74:9]
wire auto_anon_in_0_a_valid_0 = auto_anon_in_0_a_valid; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_in_0_a_bits_opcode_0 = auto_anon_in_0_a_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_in_0_a_bits_param_0 = auto_anon_in_0_a_bits_param; // @[Xbar.scala:74:9]
wire [3:0] auto_anon_in_0_a_bits_size_0 = auto_anon_in_0_a_bits_size; // @[Xbar.scala:74:9]
wire [1:0] auto_anon_in_0_a_bits_source_0 = auto_anon_in_0_a_bits_source; // @[Xbar.scala:74:9]
wire [31:0] auto_anon_in_0_a_bits_address_0 = auto_anon_in_0_a_bits_address; // @[Xbar.scala:74:9]
wire [7:0] auto_anon_in_0_a_bits_mask_0 = auto_anon_in_0_a_bits_mask; // @[Xbar.scala:74:9]
wire [63:0] auto_anon_in_0_a_bits_data_0 = auto_anon_in_0_a_bits_data; // @[Xbar.scala:74:9]
wire auto_anon_in_0_a_bits_corrupt_0 = auto_anon_in_0_a_bits_corrupt; // @[Xbar.scala:74:9]
wire auto_anon_in_0_b_ready_0 = auto_anon_in_0_b_ready; // @[Xbar.scala:74:9]
wire auto_anon_in_0_c_valid_0 = auto_anon_in_0_c_valid; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_in_0_c_bits_opcode_0 = auto_anon_in_0_c_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_in_0_c_bits_param_0 = auto_anon_in_0_c_bits_param; // @[Xbar.scala:74:9]
wire [3:0] auto_anon_in_0_c_bits_size_0 = auto_anon_in_0_c_bits_size; // @[Xbar.scala:74:9]
wire [1:0] auto_anon_in_0_c_bits_source_0 = auto_anon_in_0_c_bits_source; // @[Xbar.scala:74:9]
wire [31:0] auto_anon_in_0_c_bits_address_0 = auto_anon_in_0_c_bits_address; // @[Xbar.scala:74:9]
wire [63:0] auto_anon_in_0_c_bits_data_0 = auto_anon_in_0_c_bits_data; // @[Xbar.scala:74:9]
wire auto_anon_in_0_c_bits_corrupt_0 = auto_anon_in_0_c_bits_corrupt; // @[Xbar.scala:74:9]
wire auto_anon_in_0_d_ready_0 = auto_anon_in_0_d_ready; // @[Xbar.scala:74:9]
wire auto_anon_in_0_e_valid_0 = auto_anon_in_0_e_valid; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_in_0_e_bits_sink_0 = auto_anon_in_0_e_bits_sink; // @[Xbar.scala:74:9]
wire auto_anon_out_a_ready_0 = auto_anon_out_a_ready; // @[Xbar.scala:74:9]
wire auto_anon_out_b_valid_0 = auto_anon_out_b_valid; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_out_b_bits_opcode_0 = auto_anon_out_b_bits_opcode; // @[Xbar.scala:74:9]
wire [1:0] auto_anon_out_b_bits_param_0 = auto_anon_out_b_bits_param; // @[Xbar.scala:74:9]
wire [3:0] auto_anon_out_b_bits_size_0 = auto_anon_out_b_bits_size; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_out_b_bits_source_0 = auto_anon_out_b_bits_source; // @[Xbar.scala:74:9]
wire [31:0] auto_anon_out_b_bits_address_0 = auto_anon_out_b_bits_address; // @[Xbar.scala:74:9]
wire [7:0] auto_anon_out_b_bits_mask_0 = auto_anon_out_b_bits_mask; // @[Xbar.scala:74:9]
wire [63:0] auto_anon_out_b_bits_data_0 = auto_anon_out_b_bits_data; // @[Xbar.scala:74:9]
wire auto_anon_out_b_bits_corrupt_0 = auto_anon_out_b_bits_corrupt; // @[Xbar.scala:74:9]
wire auto_anon_out_c_ready_0 = auto_anon_out_c_ready; // @[Xbar.scala:74:9]
wire auto_anon_out_d_valid_0 = auto_anon_out_d_valid; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_out_d_bits_opcode_0 = auto_anon_out_d_bits_opcode; // @[Xbar.scala:74:9]
wire [1:0] auto_anon_out_d_bits_param_0 = auto_anon_out_d_bits_param; // @[Xbar.scala:74:9]
wire [3:0] auto_anon_out_d_bits_size_0 = auto_anon_out_d_bits_size; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_out_d_bits_source_0 = auto_anon_out_d_bits_source; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_out_d_bits_sink_0 = auto_anon_out_d_bits_sink; // @[Xbar.scala:74:9]
wire auto_anon_out_d_bits_denied_0 = auto_anon_out_d_bits_denied; // @[Xbar.scala:74:9]
wire [63:0] auto_anon_out_d_bits_data_0 = auto_anon_out_d_bits_data; // @[Xbar.scala:74:9]
wire auto_anon_out_d_bits_corrupt_0 = auto_anon_out_d_bits_corrupt; // @[Xbar.scala:74:9]
wire auto_anon_out_e_ready_0 = auto_anon_out_e_ready; // @[Xbar.scala:74:9]
wire _readys_T_2 = reset; // @[Arbiter.scala:22:12]
wire auto_anon_in_1_d_bits_source = 1'h0; // @[Xbar.scala:74:9]
wire anonIn_1_d_bits_source = 1'h0; // @[MixedNode.scala:551:17]
wire in_1_b_valid = 1'h0; // @[Xbar.scala:159:18]
wire in_1_b_bits_corrupt = 1'h0; // @[Xbar.scala:159:18]
wire in_1_c_ready = 1'h0; // @[Xbar.scala:159:18]
wire in_1_c_valid = 1'h0; // @[Xbar.scala:159:18]
wire in_1_c_bits_corrupt = 1'h0; // @[Xbar.scala:159:18]
wire in_1_e_ready = 1'h0; // @[Xbar.scala:159:18]
wire in_1_e_valid = 1'h0; // @[Xbar.scala:159:18]
wire _requestEIO_T = 1'h0; // @[Parameters.scala:54:10]
wire _requestEIO_T_5 = 1'h0; // @[Parameters.scala:54:10]
wire beatsCI_opdata_1 = 1'h0; // @[Edges.scala:102:36]
wire portsBIO_filtered_1_ready = 1'h0; // @[Xbar.scala:352:24]
wire _portsBIO_out_0_b_ready_T_1 = 1'h0; // @[Mux.scala:30:73]
wire portsCOI_filtered_1_0_ready = 1'h0; // @[Xbar.scala:352:24]
wire portsCOI_filtered_1_0_valid = 1'h0; // @[Xbar.scala:352:24]
wire portsCOI_filtered_1_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24]
wire _portsCOI_filtered_0_valid_T_3 = 1'h0; // @[Xbar.scala:355:40]
wire portsEOI_filtered_1_0_ready = 1'h0; // @[Xbar.scala:352:24]
wire portsEOI_filtered_1_0_valid = 1'h0; // @[Xbar.scala:352:24]
wire _portsEOI_filtered_0_valid_T_3 = 1'h0; // @[Xbar.scala:355:40]
wire _state_WIRE_0 = 1'h0; // @[Arbiter.scala:88:34]
wire _state_WIRE_1 = 1'h0; // @[Arbiter.scala:88:34]
wire in_1_b_ready = 1'h1; // @[Xbar.scala:159:18]
wire _requestAIO_T_4 = 1'h1; // @[Parameters.scala:137:59]
wire requestAIO_0_0 = 1'h1; // @[Xbar.scala:307:107]
wire _requestAIO_T_9 = 1'h1; // @[Parameters.scala:137:59]
wire requestAIO_1_0 = 1'h1; // @[Xbar.scala:307:107]
wire _requestCIO_T_4 = 1'h1; // @[Parameters.scala:137:59]
wire requestCIO_0_0 = 1'h1; // @[Xbar.scala:308:107]
wire _requestCIO_T_9 = 1'h1; // @[Parameters.scala:137:59]
wire requestCIO_1_0 = 1'h1; // @[Xbar.scala:308:107]
wire _requestBOI_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire _requestBOI_T_4 = 1'h1; // @[Parameters.scala:57:20]
wire _requestDOI_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire _requestDOI_T_4 = 1'h1; // @[Parameters.scala:57:20]
wire _requestEIO_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire _requestEIO_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire _requestEIO_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire _requestEIO_T_4 = 1'h1; // @[Parameters.scala:57:20]
wire requestEIO_0_0 = 1'h1; // @[Parameters.scala:56:48]
wire _requestEIO_T_6 = 1'h1; // @[Parameters.scala:54:32]
wire _requestEIO_T_7 = 1'h1; // @[Parameters.scala:56:32]
wire _requestEIO_T_8 = 1'h1; // @[Parameters.scala:54:67]
wire _requestEIO_T_9 = 1'h1; // @[Parameters.scala:57:20]
wire requestEIO_1_0 = 1'h1; // @[Parameters.scala:56:48]
wire _portsAOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire _portsAOI_filtered_0_valid_T_2 = 1'h1; // @[Xbar.scala:355:54]
wire _portsCOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire _portsCOI_filtered_0_valid_T_2 = 1'h1; // @[Xbar.scala:355:54]
wire _portsEOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire _portsEOI_filtered_0_valid_T_2 = 1'h1; // @[Xbar.scala:355:54]
wire [2:0] in_1_b_bits_opcode = 3'h0; // @[Xbar.scala:159:18]
wire [2:0] in_1_b_bits_source = 3'h0; // @[Xbar.scala:159:18]
wire [2:0] in_1_c_bits_opcode = 3'h0; // @[Xbar.scala:159:18]
wire [2:0] in_1_c_bits_param = 3'h0; // @[Xbar.scala:159:18]
wire [2:0] in_1_c_bits_source = 3'h0; // @[Xbar.scala:159:18]
wire [2:0] in_1_e_bits_sink = 3'h0; // @[Xbar.scala:159:18]
wire [2:0] _requestEIO_uncommonBits_T_1 = 3'h0; // @[Parameters.scala:52:29]
wire [2:0] requestEIO_uncommonBits_1 = 3'h0; // @[Parameters.scala:52:56]
wire [2:0] portsCOI_filtered_1_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24]
wire [2:0] portsCOI_filtered_1_0_bits_param = 3'h0; // @[Xbar.scala:352:24]
wire [2:0] portsCOI_filtered_1_0_bits_source = 3'h0; // @[Xbar.scala:352:24]
wire [2:0] portsEOI_filtered_1_0_bits_sink = 3'h0; // @[Xbar.scala:352:24]
wire [63:0] in_1_b_bits_data = 64'h0; // @[Xbar.scala:159:18]
wire [63:0] in_1_c_bits_data = 64'h0; // @[Xbar.scala:159:18]
wire [63:0] portsCOI_filtered_1_0_bits_data = 64'h0; // @[Xbar.scala:352:24]
wire [31:0] in_1_b_bits_address = 32'h0; // @[Xbar.scala:159:18]
wire [31:0] in_1_c_bits_address = 32'h0; // @[Xbar.scala:159:18]
wire [31:0] _requestCIO_T_5 = 32'h0; // @[Parameters.scala:137:31]
wire [31:0] portsCOI_filtered_1_0_bits_address = 32'h0; // @[Xbar.scala:352:24]
wire [3:0] in_1_b_bits_size = 4'h0; // @[Xbar.scala:159:18]
wire [3:0] in_1_c_bits_size = 4'h0; // @[Xbar.scala:159:18]
wire [3:0] portsCOI_filtered_1_0_bits_size = 4'h0; // @[Xbar.scala:352:24]
wire [8:0] beatsBO_0 = 9'h0; // @[Edges.scala:221:14]
wire [8:0] beatsCI_decode_1 = 9'h0; // @[Edges.scala:220:59]
wire [8:0] beatsCI_1 = 9'h0; // @[Edges.scala:221:14]
wire [11:0] _beatsCI_decode_T_5 = 12'h0; // @[package.scala:243:46]
wire [11:0] _beatsCI_decode_T_4 = 12'hFFF; // @[package.scala:243:76]
wire [26:0] _beatsCI_decode_T_3 = 27'hFFF; // @[package.scala:243:71]
wire [32:0] _requestAIO_T_2 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] _requestAIO_T_3 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] _requestAIO_T_7 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] _requestAIO_T_8 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] _requestCIO_T_2 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] _requestCIO_T_3 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] _requestCIO_T_6 = 33'h0; // @[Parameters.scala:137:41]
wire [32:0] _requestCIO_T_7 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] _requestCIO_T_8 = 33'h0; // @[Parameters.scala:137:46]
wire [7:0] in_1_b_bits_mask = 8'h0; // @[Xbar.scala:159:18]
wire [1:0] in_1_b_bits_param = 2'h0; // @[Xbar.scala:159:18]
wire anonIn_1_a_ready; // @[MixedNode.scala:551:17]
wire anonIn_1_a_valid = auto_anon_in_1_a_valid_0; // @[Xbar.scala:74:9]
wire [2:0] anonIn_1_a_bits_opcode = auto_anon_in_1_a_bits_opcode_0; // @[Xbar.scala:74:9]
wire [2:0] anonIn_1_a_bits_param = auto_anon_in_1_a_bits_param_0; // @[Xbar.scala:74:9]
wire [3:0] anonIn_1_a_bits_size = auto_anon_in_1_a_bits_size_0; // @[Xbar.scala:74:9]
wire anonIn_1_a_bits_source = auto_anon_in_1_a_bits_source_0; // @[Xbar.scala:74:9]
wire [31:0] anonIn_1_a_bits_address = auto_anon_in_1_a_bits_address_0; // @[Xbar.scala:74:9]
wire [7:0] anonIn_1_a_bits_mask = auto_anon_in_1_a_bits_mask_0; // @[Xbar.scala:74:9]
wire [63:0] anonIn_1_a_bits_data = auto_anon_in_1_a_bits_data_0; // @[Xbar.scala:74:9]
wire anonIn_1_a_bits_corrupt = auto_anon_in_1_a_bits_corrupt_0; // @[Xbar.scala:74:9]
wire anonIn_1_d_ready = auto_anon_in_1_d_ready_0; // @[Xbar.scala:74:9]
wire anonIn_1_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] anonIn_1_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] anonIn_1_d_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] anonIn_1_d_bits_size; // @[MixedNode.scala:551:17]
wire [2:0] anonIn_1_d_bits_sink; // @[MixedNode.scala:551:17]
wire anonIn_1_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] anonIn_1_d_bits_data; // @[MixedNode.scala:551:17]
wire anonIn_1_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire anonIn_a_ready; // @[MixedNode.scala:551:17]
wire anonIn_a_valid = auto_anon_in_0_a_valid_0; // @[Xbar.scala:74:9]
wire [2:0] anonIn_a_bits_opcode = auto_anon_in_0_a_bits_opcode_0; // @[Xbar.scala:74:9]
wire [2:0] anonIn_a_bits_param = auto_anon_in_0_a_bits_param_0; // @[Xbar.scala:74:9]
wire [3:0] anonIn_a_bits_size = auto_anon_in_0_a_bits_size_0; // @[Xbar.scala:74:9]
wire [1:0] anonIn_a_bits_source = auto_anon_in_0_a_bits_source_0; // @[Xbar.scala:74:9]
wire [31:0] anonIn_a_bits_address = auto_anon_in_0_a_bits_address_0; // @[Xbar.scala:74:9]
wire [7:0] anonIn_a_bits_mask = auto_anon_in_0_a_bits_mask_0; // @[Xbar.scala:74:9]
wire [63:0] anonIn_a_bits_data = auto_anon_in_0_a_bits_data_0; // @[Xbar.scala:74:9]
wire anonIn_a_bits_corrupt = auto_anon_in_0_a_bits_corrupt_0; // @[Xbar.scala:74:9]
wire anonIn_b_ready = auto_anon_in_0_b_ready_0; // @[Xbar.scala:74:9]
wire anonIn_b_valid; // @[MixedNode.scala:551:17]
wire [2:0] anonIn_b_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] anonIn_b_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] anonIn_b_bits_size; // @[MixedNode.scala:551:17]
wire [1:0] anonIn_b_bits_source; // @[MixedNode.scala:551:17]
wire [31:0] anonIn_b_bits_address; // @[MixedNode.scala:551:17]
wire [7:0] anonIn_b_bits_mask; // @[MixedNode.scala:551:17]
wire [63:0] anonIn_b_bits_data; // @[MixedNode.scala:551:17]
wire anonIn_b_bits_corrupt; // @[MixedNode.scala:551:17]
wire anonIn_c_ready; // @[MixedNode.scala:551:17]
wire anonIn_c_valid = auto_anon_in_0_c_valid_0; // @[Xbar.scala:74:9]
wire [2:0] anonIn_c_bits_opcode = auto_anon_in_0_c_bits_opcode_0; // @[Xbar.scala:74:9]
wire [2:0] anonIn_c_bits_param = auto_anon_in_0_c_bits_param_0; // @[Xbar.scala:74:9]
wire [3:0] anonIn_c_bits_size = auto_anon_in_0_c_bits_size_0; // @[Xbar.scala:74:9]
wire [1:0] anonIn_c_bits_source = auto_anon_in_0_c_bits_source_0; // @[Xbar.scala:74:9]
wire [31:0] anonIn_c_bits_address = auto_anon_in_0_c_bits_address_0; // @[Xbar.scala:74:9]
wire [63:0] anonIn_c_bits_data = auto_anon_in_0_c_bits_data_0; // @[Xbar.scala:74:9]
wire anonIn_c_bits_corrupt = auto_anon_in_0_c_bits_corrupt_0; // @[Xbar.scala:74:9]
wire anonIn_d_ready = auto_anon_in_0_d_ready_0; // @[Xbar.scala:74:9]
wire anonIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] anonIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] anonIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [1:0] anonIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [2:0] anonIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire anonIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] anonIn_d_bits_data; // @[MixedNode.scala:551:17]
wire anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire anonIn_e_ready; // @[MixedNode.scala:551:17]
wire anonIn_e_valid = auto_anon_in_0_e_valid_0; // @[Xbar.scala:74:9]
wire [2:0] anonIn_e_bits_sink = auto_anon_in_0_e_bits_sink_0; // @[Xbar.scala:74:9]
wire anonOut_a_ready = auto_anon_out_a_ready_0; // @[Xbar.scala:74:9]
wire anonOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] anonOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] anonOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [2:0] anonOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] anonOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] anonOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] anonOut_a_bits_data; // @[MixedNode.scala:542:17]
wire anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire anonOut_b_ready; // @[MixedNode.scala:542:17]
wire anonOut_b_valid = auto_anon_out_b_valid_0; // @[Xbar.scala:74:9]
wire [2:0] anonOut_b_bits_opcode = auto_anon_out_b_bits_opcode_0; // @[Xbar.scala:74:9]
wire [1:0] anonOut_b_bits_param = auto_anon_out_b_bits_param_0; // @[Xbar.scala:74:9]
wire [3:0] anonOut_b_bits_size = auto_anon_out_b_bits_size_0; // @[Xbar.scala:74:9]
wire [2:0] anonOut_b_bits_source = auto_anon_out_b_bits_source_0; // @[Xbar.scala:74:9]
wire [31:0] anonOut_b_bits_address = auto_anon_out_b_bits_address_0; // @[Xbar.scala:74:9]
wire [7:0] anonOut_b_bits_mask = auto_anon_out_b_bits_mask_0; // @[Xbar.scala:74:9]
wire [63:0] anonOut_b_bits_data = auto_anon_out_b_bits_data_0; // @[Xbar.scala:74:9]
wire anonOut_b_bits_corrupt = auto_anon_out_b_bits_corrupt_0; // @[Xbar.scala:74:9]
wire anonOut_c_ready = auto_anon_out_c_ready_0; // @[Xbar.scala:74:9]
wire anonOut_c_valid; // @[MixedNode.scala:542:17]
wire [2:0] anonOut_c_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] anonOut_c_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] anonOut_c_bits_size; // @[MixedNode.scala:542:17]
wire [2:0] anonOut_c_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] anonOut_c_bits_address; // @[MixedNode.scala:542:17]
wire [63:0] anonOut_c_bits_data; // @[MixedNode.scala:542:17]
wire anonOut_c_bits_corrupt; // @[MixedNode.scala:542:17]
wire anonOut_d_ready; // @[MixedNode.scala:542:17]
wire anonOut_d_valid = auto_anon_out_d_valid_0; // @[Xbar.scala:74:9]
wire [2:0] anonOut_d_bits_opcode = auto_anon_out_d_bits_opcode_0; // @[Xbar.scala:74:9]
wire [1:0] anonOut_d_bits_param = auto_anon_out_d_bits_param_0; // @[Xbar.scala:74:9]
wire [3:0] anonOut_d_bits_size = auto_anon_out_d_bits_size_0; // @[Xbar.scala:74:9]
wire [2:0] anonOut_d_bits_source = auto_anon_out_d_bits_source_0; // @[Xbar.scala:74:9]
wire [2:0] anonOut_d_bits_sink = auto_anon_out_d_bits_sink_0; // @[Xbar.scala:74:9]
wire anonOut_d_bits_denied = auto_anon_out_d_bits_denied_0; // @[Xbar.scala:74:9]
wire [63:0] anonOut_d_bits_data = auto_anon_out_d_bits_data_0; // @[Xbar.scala:74:9]
wire anonOut_d_bits_corrupt = auto_anon_out_d_bits_corrupt_0; // @[Xbar.scala:74:9]
wire anonOut_e_ready = auto_anon_out_e_ready_0; // @[Xbar.scala:74:9]
wire anonOut_e_valid; // @[MixedNode.scala:542:17]
wire [2:0] anonOut_e_bits_sink; // @[MixedNode.scala:542:17]
wire auto_anon_in_1_a_ready_0; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_in_1_d_bits_opcode_0; // @[Xbar.scala:74:9]
wire [1:0] auto_anon_in_1_d_bits_param_0; // @[Xbar.scala:74:9]
wire [3:0] auto_anon_in_1_d_bits_size_0; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_in_1_d_bits_sink_0; // @[Xbar.scala:74:9]
wire auto_anon_in_1_d_bits_denied_0; // @[Xbar.scala:74:9]
wire [63:0] auto_anon_in_1_d_bits_data_0; // @[Xbar.scala:74:9]
wire auto_anon_in_1_d_bits_corrupt_0; // @[Xbar.scala:74:9]
wire auto_anon_in_1_d_valid_0; // @[Xbar.scala:74:9]
wire auto_anon_in_0_a_ready_0; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_in_0_b_bits_opcode_0; // @[Xbar.scala:74:9]
wire [1:0] auto_anon_in_0_b_bits_param_0; // @[Xbar.scala:74:9]
wire [3:0] auto_anon_in_0_b_bits_size_0; // @[Xbar.scala:74:9]
wire [1:0] auto_anon_in_0_b_bits_source_0; // @[Xbar.scala:74:9]
wire [31:0] auto_anon_in_0_b_bits_address_0; // @[Xbar.scala:74:9]
wire [7:0] auto_anon_in_0_b_bits_mask_0; // @[Xbar.scala:74:9]
wire [63:0] auto_anon_in_0_b_bits_data_0; // @[Xbar.scala:74:9]
wire auto_anon_in_0_b_bits_corrupt_0; // @[Xbar.scala:74:9]
wire auto_anon_in_0_b_valid_0; // @[Xbar.scala:74:9]
wire auto_anon_in_0_c_ready_0; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_in_0_d_bits_opcode_0; // @[Xbar.scala:74:9]
wire [1:0] auto_anon_in_0_d_bits_param_0; // @[Xbar.scala:74:9]
wire [3:0] auto_anon_in_0_d_bits_size_0; // @[Xbar.scala:74:9]
wire [1:0] auto_anon_in_0_d_bits_source_0; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_in_0_d_bits_sink_0; // @[Xbar.scala:74:9]
wire auto_anon_in_0_d_bits_denied_0; // @[Xbar.scala:74:9]
wire [63:0] auto_anon_in_0_d_bits_data_0; // @[Xbar.scala:74:9]
wire auto_anon_in_0_d_bits_corrupt_0; // @[Xbar.scala:74:9]
wire auto_anon_in_0_d_valid_0; // @[Xbar.scala:74:9]
wire auto_anon_in_0_e_ready_0; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_out_a_bits_opcode_0; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_out_a_bits_param_0; // @[Xbar.scala:74:9]
wire [3:0] auto_anon_out_a_bits_size_0; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_out_a_bits_source_0; // @[Xbar.scala:74:9]
wire [31:0] auto_anon_out_a_bits_address_0; // @[Xbar.scala:74:9]
wire [7:0] auto_anon_out_a_bits_mask_0; // @[Xbar.scala:74:9]
wire [63:0] auto_anon_out_a_bits_data_0; // @[Xbar.scala:74:9]
wire auto_anon_out_a_bits_corrupt_0; // @[Xbar.scala:74:9]
wire auto_anon_out_a_valid_0; // @[Xbar.scala:74:9]
wire auto_anon_out_b_ready_0; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_out_c_bits_opcode_0; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_out_c_bits_param_0; // @[Xbar.scala:74:9]
wire [3:0] auto_anon_out_c_bits_size_0; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_out_c_bits_source_0; // @[Xbar.scala:74:9]
wire [31:0] auto_anon_out_c_bits_address_0; // @[Xbar.scala:74:9]
wire [63:0] auto_anon_out_c_bits_data_0; // @[Xbar.scala:74:9]
wire auto_anon_out_c_bits_corrupt_0; // @[Xbar.scala:74:9]
wire auto_anon_out_c_valid_0; // @[Xbar.scala:74:9]
wire auto_anon_out_d_ready_0; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_out_e_bits_sink_0; // @[Xbar.scala:74:9]
wire auto_anon_out_e_valid_0; // @[Xbar.scala:74:9]
wire in_0_a_ready; // @[Xbar.scala:159:18]
assign auto_anon_in_0_a_ready_0 = anonIn_a_ready; // @[Xbar.scala:74:9]
wire in_0_a_valid = anonIn_a_valid; // @[Xbar.scala:159:18]
wire [2:0] in_0_a_bits_opcode = anonIn_a_bits_opcode; // @[Xbar.scala:159:18]
wire [2:0] in_0_a_bits_param = anonIn_a_bits_param; // @[Xbar.scala:159:18]
wire [3:0] in_0_a_bits_size = anonIn_a_bits_size; // @[Xbar.scala:159:18]
wire [1:0] _in_0_a_bits_source_T = anonIn_a_bits_source; // @[Xbar.scala:166:55]
wire [31:0] in_0_a_bits_address = anonIn_a_bits_address; // @[Xbar.scala:159:18]
wire [7:0] in_0_a_bits_mask = anonIn_a_bits_mask; // @[Xbar.scala:159:18]
wire [63:0] in_0_a_bits_data = anonIn_a_bits_data; // @[Xbar.scala:159:18]
wire in_0_a_bits_corrupt = anonIn_a_bits_corrupt; // @[Xbar.scala:159:18]
wire in_0_b_ready = anonIn_b_ready; // @[Xbar.scala:159:18]
wire in_0_b_valid; // @[Xbar.scala:159:18]
assign auto_anon_in_0_b_valid_0 = anonIn_b_valid; // @[Xbar.scala:74:9]
wire [2:0] in_0_b_bits_opcode; // @[Xbar.scala:159:18]
assign auto_anon_in_0_b_bits_opcode_0 = anonIn_b_bits_opcode; // @[Xbar.scala:74:9]
wire [1:0] in_0_b_bits_param; // @[Xbar.scala:159:18]
assign auto_anon_in_0_b_bits_param_0 = anonIn_b_bits_param; // @[Xbar.scala:74:9]
wire [3:0] in_0_b_bits_size; // @[Xbar.scala:159:18]
assign auto_anon_in_0_b_bits_size_0 = anonIn_b_bits_size; // @[Xbar.scala:74:9]
wire [1:0] _anonIn_b_bits_source_T; // @[Xbar.scala:156:69]
assign auto_anon_in_0_b_bits_source_0 = anonIn_b_bits_source; // @[Xbar.scala:74:9]
wire [31:0] in_0_b_bits_address; // @[Xbar.scala:159:18]
assign auto_anon_in_0_b_bits_address_0 = anonIn_b_bits_address; // @[Xbar.scala:74:9]
wire [7:0] in_0_b_bits_mask; // @[Xbar.scala:159:18]
assign auto_anon_in_0_b_bits_mask_0 = anonIn_b_bits_mask; // @[Xbar.scala:74:9]
wire [63:0] in_0_b_bits_data; // @[Xbar.scala:159:18]
assign auto_anon_in_0_b_bits_data_0 = anonIn_b_bits_data; // @[Xbar.scala:74:9]
wire in_0_b_bits_corrupt; // @[Xbar.scala:159:18]
assign auto_anon_in_0_b_bits_corrupt_0 = anonIn_b_bits_corrupt; // @[Xbar.scala:74:9]
wire in_0_c_ready; // @[Xbar.scala:159:18]
assign auto_anon_in_0_c_ready_0 = anonIn_c_ready; // @[Xbar.scala:74:9]
wire in_0_c_valid = anonIn_c_valid; // @[Xbar.scala:159:18]
wire [2:0] in_0_c_bits_opcode = anonIn_c_bits_opcode; // @[Xbar.scala:159:18]
wire [2:0] in_0_c_bits_param = anonIn_c_bits_param; // @[Xbar.scala:159:18]
wire [3:0] in_0_c_bits_size = anonIn_c_bits_size; // @[Xbar.scala:159:18]
wire [1:0] _in_0_c_bits_source_T = anonIn_c_bits_source; // @[Xbar.scala:187:55]
wire [31:0] in_0_c_bits_address = anonIn_c_bits_address; // @[Xbar.scala:159:18]
wire [63:0] in_0_c_bits_data = anonIn_c_bits_data; // @[Xbar.scala:159:18]
wire in_0_c_bits_corrupt = anonIn_c_bits_corrupt; // @[Xbar.scala:159:18]
wire in_0_d_ready = anonIn_d_ready; // @[Xbar.scala:159:18]
wire in_0_d_valid; // @[Xbar.scala:159:18]
assign auto_anon_in_0_d_valid_0 = anonIn_d_valid; // @[Xbar.scala:74:9]
wire [2:0] in_0_d_bits_opcode; // @[Xbar.scala:159:18]
assign auto_anon_in_0_d_bits_opcode_0 = anonIn_d_bits_opcode; // @[Xbar.scala:74:9]
wire [1:0] in_0_d_bits_param; // @[Xbar.scala:159:18]
assign auto_anon_in_0_d_bits_param_0 = anonIn_d_bits_param; // @[Xbar.scala:74:9]
wire [3:0] in_0_d_bits_size; // @[Xbar.scala:159:18]
assign auto_anon_in_0_d_bits_size_0 = anonIn_d_bits_size; // @[Xbar.scala:74:9]
wire [1:0] _anonIn_d_bits_source_T; // @[Xbar.scala:156:69]
assign auto_anon_in_0_d_bits_source_0 = anonIn_d_bits_source; // @[Xbar.scala:74:9]
wire [2:0] in_0_d_bits_sink; // @[Xbar.scala:159:18]
assign auto_anon_in_0_d_bits_sink_0 = anonIn_d_bits_sink; // @[Xbar.scala:74:9]
wire in_0_d_bits_denied; // @[Xbar.scala:159:18]
assign auto_anon_in_0_d_bits_denied_0 = anonIn_d_bits_denied; // @[Xbar.scala:74:9]
wire [63:0] in_0_d_bits_data; // @[Xbar.scala:159:18]
assign auto_anon_in_0_d_bits_data_0 = anonIn_d_bits_data; // @[Xbar.scala:74:9]
wire in_0_d_bits_corrupt; // @[Xbar.scala:159:18]
assign auto_anon_in_0_d_bits_corrupt_0 = anonIn_d_bits_corrupt; // @[Xbar.scala:74:9]
wire in_0_e_ready; // @[Xbar.scala:159:18]
assign auto_anon_in_0_e_ready_0 = anonIn_e_ready; // @[Xbar.scala:74:9]
wire in_0_e_valid = anonIn_e_valid; // @[Xbar.scala:159:18]
wire [2:0] in_0_e_bits_sink = anonIn_e_bits_sink; // @[Xbar.scala:159:18]
wire in_1_a_ready; // @[Xbar.scala:159:18]
assign auto_anon_in_1_a_ready_0 = anonIn_1_a_ready; // @[Xbar.scala:74:9]
wire in_1_a_valid = anonIn_1_a_valid; // @[Xbar.scala:159:18]
wire [2:0] in_1_a_bits_opcode = anonIn_1_a_bits_opcode; // @[Xbar.scala:159:18]
wire [2:0] in_1_a_bits_param = anonIn_1_a_bits_param; // @[Xbar.scala:159:18]
wire [3:0] in_1_a_bits_size = anonIn_1_a_bits_size; // @[Xbar.scala:159:18]
wire [31:0] in_1_a_bits_address = anonIn_1_a_bits_address; // @[Xbar.scala:159:18]
wire [7:0] in_1_a_bits_mask = anonIn_1_a_bits_mask; // @[Xbar.scala:159:18]
wire [63:0] in_1_a_bits_data = anonIn_1_a_bits_data; // @[Xbar.scala:159:18]
wire in_1_a_bits_corrupt = anonIn_1_a_bits_corrupt; // @[Xbar.scala:159:18]
wire in_1_d_ready = anonIn_1_d_ready; // @[Xbar.scala:159:18]
wire in_1_d_valid; // @[Xbar.scala:159:18]
assign auto_anon_in_1_d_valid_0 = anonIn_1_d_valid; // @[Xbar.scala:74:9]
wire [2:0] in_1_d_bits_opcode; // @[Xbar.scala:159:18]
assign auto_anon_in_1_d_bits_opcode_0 = anonIn_1_d_bits_opcode; // @[Xbar.scala:74:9]
wire [1:0] in_1_d_bits_param; // @[Xbar.scala:159:18]
assign auto_anon_in_1_d_bits_param_0 = anonIn_1_d_bits_param; // @[Xbar.scala:74:9]
wire [3:0] in_1_d_bits_size; // @[Xbar.scala:159:18]
assign auto_anon_in_1_d_bits_size_0 = anonIn_1_d_bits_size; // @[Xbar.scala:74:9]
wire [2:0] in_1_d_bits_sink; // @[Xbar.scala:159:18]
assign auto_anon_in_1_d_bits_sink_0 = anonIn_1_d_bits_sink; // @[Xbar.scala:74:9]
wire in_1_d_bits_denied; // @[Xbar.scala:159:18]
assign auto_anon_in_1_d_bits_denied_0 = anonIn_1_d_bits_denied; // @[Xbar.scala:74:9]
wire [63:0] in_1_d_bits_data; // @[Xbar.scala:159:18]
assign auto_anon_in_1_d_bits_data_0 = anonIn_1_d_bits_data; // @[Xbar.scala:74:9]
wire in_1_d_bits_corrupt; // @[Xbar.scala:159:18]
assign auto_anon_in_1_d_bits_corrupt_0 = anonIn_1_d_bits_corrupt; // @[Xbar.scala:74:9]
wire out_0_a_ready = anonOut_a_ready; // @[Xbar.scala:216:19]
wire out_0_a_valid; // @[Xbar.scala:216:19]
assign auto_anon_out_a_valid_0 = anonOut_a_valid; // @[Xbar.scala:74:9]
wire [2:0] out_0_a_bits_opcode; // @[Xbar.scala:216:19]
assign auto_anon_out_a_bits_opcode_0 = anonOut_a_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] out_0_a_bits_param; // @[Xbar.scala:216:19]
assign auto_anon_out_a_bits_param_0 = anonOut_a_bits_param; // @[Xbar.scala:74:9]
wire [3:0] out_0_a_bits_size; // @[Xbar.scala:216:19]
assign auto_anon_out_a_bits_size_0 = anonOut_a_bits_size; // @[Xbar.scala:74:9]
wire [2:0] out_0_a_bits_source; // @[Xbar.scala:216:19]
assign auto_anon_out_a_bits_source_0 = anonOut_a_bits_source; // @[Xbar.scala:74:9]
wire [31:0] out_0_a_bits_address; // @[Xbar.scala:216:19]
assign auto_anon_out_a_bits_address_0 = anonOut_a_bits_address; // @[Xbar.scala:74:9]
wire [7:0] out_0_a_bits_mask; // @[Xbar.scala:216:19]
assign auto_anon_out_a_bits_mask_0 = anonOut_a_bits_mask; // @[Xbar.scala:74:9]
wire [63:0] out_0_a_bits_data; // @[Xbar.scala:216:19]
assign auto_anon_out_a_bits_data_0 = anonOut_a_bits_data; // @[Xbar.scala:74:9]
wire out_0_a_bits_corrupt; // @[Xbar.scala:216:19]
assign auto_anon_out_a_bits_corrupt_0 = anonOut_a_bits_corrupt; // @[Xbar.scala:74:9]
wire out_0_b_ready; // @[Xbar.scala:216:19]
assign auto_anon_out_b_ready_0 = anonOut_b_ready; // @[Xbar.scala:74:9]
wire out_0_b_valid = anonOut_b_valid; // @[Xbar.scala:216:19]
wire [2:0] out_0_b_bits_opcode = anonOut_b_bits_opcode; // @[Xbar.scala:216:19]
wire [1:0] out_0_b_bits_param = anonOut_b_bits_param; // @[Xbar.scala:216:19]
wire [3:0] out_0_b_bits_size = anonOut_b_bits_size; // @[Xbar.scala:216:19]
wire [2:0] out_0_b_bits_source = anonOut_b_bits_source; // @[Xbar.scala:216:19]
wire [31:0] out_0_b_bits_address = anonOut_b_bits_address; // @[Xbar.scala:216:19]
wire [7:0] out_0_b_bits_mask = anonOut_b_bits_mask; // @[Xbar.scala:216:19]
wire [63:0] out_0_b_bits_data = anonOut_b_bits_data; // @[Xbar.scala:216:19]
wire out_0_b_bits_corrupt = anonOut_b_bits_corrupt; // @[Xbar.scala:216:19]
wire out_0_c_ready = anonOut_c_ready; // @[Xbar.scala:216:19]
wire out_0_c_valid; // @[Xbar.scala:216:19]
assign auto_anon_out_c_valid_0 = anonOut_c_valid; // @[Xbar.scala:74:9]
wire [2:0] out_0_c_bits_opcode; // @[Xbar.scala:216:19]
assign auto_anon_out_c_bits_opcode_0 = anonOut_c_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] out_0_c_bits_param; // @[Xbar.scala:216:19]
assign auto_anon_out_c_bits_param_0 = anonOut_c_bits_param; // @[Xbar.scala:74:9]
wire [3:0] out_0_c_bits_size; // @[Xbar.scala:216:19]
assign auto_anon_out_c_bits_size_0 = anonOut_c_bits_size; // @[Xbar.scala:74:9]
wire [2:0] out_0_c_bits_source; // @[Xbar.scala:216:19]
assign auto_anon_out_c_bits_source_0 = anonOut_c_bits_source; // @[Xbar.scala:74:9]
wire [31:0] out_0_c_bits_address; // @[Xbar.scala:216:19]
assign auto_anon_out_c_bits_address_0 = anonOut_c_bits_address; // @[Xbar.scala:74:9]
wire [63:0] out_0_c_bits_data; // @[Xbar.scala:216:19]
assign auto_anon_out_c_bits_data_0 = anonOut_c_bits_data; // @[Xbar.scala:74:9]
wire out_0_c_bits_corrupt; // @[Xbar.scala:216:19]
assign auto_anon_out_c_bits_corrupt_0 = anonOut_c_bits_corrupt; // @[Xbar.scala:74:9]
wire out_0_d_ready; // @[Xbar.scala:216:19]
assign auto_anon_out_d_ready_0 = anonOut_d_ready; // @[Xbar.scala:74:9]
wire out_0_d_valid = anonOut_d_valid; // @[Xbar.scala:216:19]
wire [2:0] out_0_d_bits_opcode = anonOut_d_bits_opcode; // @[Xbar.scala:216:19]
wire [1:0] out_0_d_bits_param = anonOut_d_bits_param; // @[Xbar.scala:216:19]
wire [3:0] out_0_d_bits_size = anonOut_d_bits_size; // @[Xbar.scala:216:19]
wire [2:0] out_0_d_bits_source = anonOut_d_bits_source; // @[Xbar.scala:216:19]
wire [2:0] _out_0_d_bits_sink_T = anonOut_d_bits_sink; // @[Xbar.scala:251:53]
wire out_0_d_bits_denied = anonOut_d_bits_denied; // @[Xbar.scala:216:19]
wire [63:0] out_0_d_bits_data = anonOut_d_bits_data; // @[Xbar.scala:216:19]
wire out_0_d_bits_corrupt = anonOut_d_bits_corrupt; // @[Xbar.scala:216:19]
wire out_0_e_ready = anonOut_e_ready; // @[Xbar.scala:216:19]
wire out_0_e_valid; // @[Xbar.scala:216:19]
assign auto_anon_out_e_valid_0 = anonOut_e_valid; // @[Xbar.scala:74:9]
wire [2:0] _anonOut_e_bits_sink_T; // @[Xbar.scala:156:69]
assign auto_anon_out_e_bits_sink_0 = anonOut_e_bits_sink; // @[Xbar.scala:74:9]
wire portsAOI_filtered_0_ready; // @[Xbar.scala:352:24]
assign anonIn_a_ready = in_0_a_ready; // @[Xbar.scala:159:18]
wire _portsAOI_filtered_0_valid_T_1 = in_0_a_valid; // @[Xbar.scala:159:18, :355:40]
wire [2:0] portsAOI_filtered_0_bits_opcode = in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24]
wire [2:0] portsAOI_filtered_0_bits_param = in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24]
wire [3:0] portsAOI_filtered_0_bits_size = in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24]
wire [2:0] portsAOI_filtered_0_bits_source = in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24]
wire [31:0] _requestAIO_T = in_0_a_bits_address; // @[Xbar.scala:159:18]
wire [31:0] portsAOI_filtered_0_bits_address = in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24]
wire [7:0] portsAOI_filtered_0_bits_mask = in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24]
wire [63:0] portsAOI_filtered_0_bits_data = in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24]
wire portsAOI_filtered_0_bits_corrupt = in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24]
wire portsBIO_filtered_0_ready = in_0_b_ready; // @[Xbar.scala:159:18, :352:24]
wire portsBIO_filtered_0_valid; // @[Xbar.scala:352:24]
assign anonIn_b_valid = in_0_b_valid; // @[Xbar.scala:159:18]
wire [2:0] portsBIO_filtered_0_bits_opcode; // @[Xbar.scala:352:24]
assign anonIn_b_bits_opcode = in_0_b_bits_opcode; // @[Xbar.scala:159:18]
wire [1:0] portsBIO_filtered_0_bits_param; // @[Xbar.scala:352:24]
assign anonIn_b_bits_param = in_0_b_bits_param; // @[Xbar.scala:159:18]
wire [3:0] portsBIO_filtered_0_bits_size; // @[Xbar.scala:352:24]
assign anonIn_b_bits_size = in_0_b_bits_size; // @[Xbar.scala:159:18]
wire [2:0] portsBIO_filtered_0_bits_source; // @[Xbar.scala:352:24]
wire [31:0] portsBIO_filtered_0_bits_address; // @[Xbar.scala:352:24]
assign anonIn_b_bits_address = in_0_b_bits_address; // @[Xbar.scala:159:18]
wire [7:0] portsBIO_filtered_0_bits_mask; // @[Xbar.scala:352:24]
assign anonIn_b_bits_mask = in_0_b_bits_mask; // @[Xbar.scala:159:18]
wire [63:0] portsBIO_filtered_0_bits_data; // @[Xbar.scala:352:24]
assign anonIn_b_bits_data = in_0_b_bits_data; // @[Xbar.scala:159:18]
wire portsBIO_filtered_0_bits_corrupt; // @[Xbar.scala:352:24]
assign anonIn_b_bits_corrupt = in_0_b_bits_corrupt; // @[Xbar.scala:159:18]
wire portsCOI_filtered_0_ready; // @[Xbar.scala:352:24]
assign anonIn_c_ready = in_0_c_ready; // @[Xbar.scala:159:18]
wire _portsCOI_filtered_0_valid_T_1 = in_0_c_valid; // @[Xbar.scala:159:18, :355:40]
wire [2:0] portsCOI_filtered_0_bits_opcode = in_0_c_bits_opcode; // @[Xbar.scala:159:18, :352:24]
wire [2:0] portsCOI_filtered_0_bits_param = in_0_c_bits_param; // @[Xbar.scala:159:18, :352:24]
wire [3:0] portsCOI_filtered_0_bits_size = in_0_c_bits_size; // @[Xbar.scala:159:18, :352:24]
wire [2:0] portsCOI_filtered_0_bits_source = in_0_c_bits_source; // @[Xbar.scala:159:18, :352:24]
wire [31:0] _requestCIO_T = in_0_c_bits_address; // @[Xbar.scala:159:18]
wire [31:0] portsCOI_filtered_0_bits_address = in_0_c_bits_address; // @[Xbar.scala:159:18, :352:24]
wire [63:0] portsCOI_filtered_0_bits_data = in_0_c_bits_data; // @[Xbar.scala:159:18, :352:24]
wire portsCOI_filtered_0_bits_corrupt = in_0_c_bits_corrupt; // @[Xbar.scala:159:18, :352:24]
wire portsDIO_filtered_0_ready = in_0_d_ready; // @[Xbar.scala:159:18, :352:24]
wire portsDIO_filtered_0_valid; // @[Xbar.scala:352:24]
assign anonIn_d_valid = in_0_d_valid; // @[Xbar.scala:159:18]
wire [2:0] portsDIO_filtered_0_bits_opcode; // @[Xbar.scala:352:24]
assign anonIn_d_bits_opcode = in_0_d_bits_opcode; // @[Xbar.scala:159:18]
wire [1:0] portsDIO_filtered_0_bits_param; // @[Xbar.scala:352:24]
assign anonIn_d_bits_param = in_0_d_bits_param; // @[Xbar.scala:159:18]
wire [3:0] portsDIO_filtered_0_bits_size; // @[Xbar.scala:352:24]
assign anonIn_d_bits_size = in_0_d_bits_size; // @[Xbar.scala:159:18]
wire [2:0] portsDIO_filtered_0_bits_source; // @[Xbar.scala:352:24]
wire [2:0] portsDIO_filtered_0_bits_sink; // @[Xbar.scala:352:24]
assign anonIn_d_bits_sink = in_0_d_bits_sink; // @[Xbar.scala:159:18]
wire portsDIO_filtered_0_bits_denied; // @[Xbar.scala:352:24]
assign anonIn_d_bits_denied = in_0_d_bits_denied; // @[Xbar.scala:159:18]
wire [63:0] portsDIO_filtered_0_bits_data; // @[Xbar.scala:352:24]
assign anonIn_d_bits_data = in_0_d_bits_data; // @[Xbar.scala:159:18]
wire portsDIO_filtered_0_bits_corrupt; // @[Xbar.scala:352:24]
assign anonIn_d_bits_corrupt = in_0_d_bits_corrupt; // @[Xbar.scala:159:18]
wire portsEOI_filtered_0_ready; // @[Xbar.scala:352:24]
assign anonIn_e_ready = in_0_e_ready; // @[Xbar.scala:159:18]
wire _portsEOI_filtered_0_valid_T_1 = in_0_e_valid; // @[Xbar.scala:159:18, :355:40]
wire [2:0] _requestEIO_uncommonBits_T = in_0_e_bits_sink; // @[Xbar.scala:159:18]
wire portsAOI_filtered_1_0_ready; // @[Xbar.scala:352:24]
wire [2:0] portsEOI_filtered_0_bits_sink = in_0_e_bits_sink; // @[Xbar.scala:159:18, :352:24]
assign anonIn_1_a_ready = in_1_a_ready; // @[Xbar.scala:159:18]
wire _portsAOI_filtered_0_valid_T_3 = in_1_a_valid; // @[Xbar.scala:159:18, :355:40]
wire [2:0] portsAOI_filtered_1_0_bits_opcode = in_1_a_bits_opcode; // @[Xbar.scala:159:18, :352:24]
wire [2:0] portsAOI_filtered_1_0_bits_param = in_1_a_bits_param; // @[Xbar.scala:159:18, :352:24]
wire [2:0] _in_1_a_bits_source_T; // @[Xbar.scala:166:55]
wire [3:0] portsAOI_filtered_1_0_bits_size = in_1_a_bits_size; // @[Xbar.scala:159:18, :352:24]
wire [2:0] portsAOI_filtered_1_0_bits_source = in_1_a_bits_source; // @[Xbar.scala:159:18, :352:24]
wire [31:0] _requestAIO_T_5 = in_1_a_bits_address; // @[Xbar.scala:159:18]
wire [31:0] portsAOI_filtered_1_0_bits_address = in_1_a_bits_address; // @[Xbar.scala:159:18, :352:24]
wire [7:0] portsAOI_filtered_1_0_bits_mask = in_1_a_bits_mask; // @[Xbar.scala:159:18, :352:24]
wire [63:0] portsAOI_filtered_1_0_bits_data = in_1_a_bits_data; // @[Xbar.scala:159:18, :352:24]
wire portsAOI_filtered_1_0_bits_corrupt = in_1_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24]
wire portsDIO_filtered_1_ready = in_1_d_ready; // @[Xbar.scala:159:18, :352:24]
wire portsDIO_filtered_1_valid; // @[Xbar.scala:352:24]
assign anonIn_1_d_valid = in_1_d_valid; // @[Xbar.scala:159:18]
wire [2:0] portsDIO_filtered_1_bits_opcode; // @[Xbar.scala:352:24]
assign anonIn_1_d_bits_opcode = in_1_d_bits_opcode; // @[Xbar.scala:159:18]
wire [1:0] portsDIO_filtered_1_bits_param; // @[Xbar.scala:352:24]
assign anonIn_1_d_bits_param = in_1_d_bits_param; // @[Xbar.scala:159:18]
wire [3:0] portsDIO_filtered_1_bits_size; // @[Xbar.scala:352:24]
assign anonIn_1_d_bits_size = in_1_d_bits_size; // @[Xbar.scala:159:18]
wire [2:0] portsDIO_filtered_1_bits_source; // @[Xbar.scala:352:24]
wire [2:0] portsDIO_filtered_1_bits_sink; // @[Xbar.scala:352:24]
assign anonIn_1_d_bits_sink = in_1_d_bits_sink; // @[Xbar.scala:159:18]
wire portsDIO_filtered_1_bits_denied; // @[Xbar.scala:352:24]
assign anonIn_1_d_bits_denied = in_1_d_bits_denied; // @[Xbar.scala:159:18]
wire [63:0] portsDIO_filtered_1_bits_data; // @[Xbar.scala:352:24]
assign anonIn_1_d_bits_data = in_1_d_bits_data; // @[Xbar.scala:159:18]
wire portsDIO_filtered_1_bits_corrupt; // @[Xbar.scala:352:24]
assign anonIn_1_d_bits_corrupt = in_1_d_bits_corrupt; // @[Xbar.scala:159:18]
wire [2:0] in_0_b_bits_source; // @[Xbar.scala:159:18]
wire [2:0] in_0_d_bits_source; // @[Xbar.scala:159:18]
wire [2:0] in_1_d_bits_source; // @[Xbar.scala:159:18]
assign in_0_a_bits_source = {1'h0, _in_0_a_bits_source_T}; // @[Xbar.scala:159:18, :166:{29,55}]
assign _anonIn_b_bits_source_T = in_0_b_bits_source[1:0]; // @[Xbar.scala:156:69, :159:18]
assign anonIn_b_bits_source = _anonIn_b_bits_source_T; // @[Xbar.scala:156:69]
assign in_0_c_bits_source = {1'h0, _in_0_c_bits_source_T}; // @[Xbar.scala:159:18, :187:{29,55}]
assign _anonIn_d_bits_source_T = in_0_d_bits_source[1:0]; // @[Xbar.scala:156:69, :159:18]
assign anonIn_d_bits_source = _anonIn_d_bits_source_T; // @[Xbar.scala:156:69]
assign _in_1_a_bits_source_T = {2'h2, anonIn_1_a_bits_source}; // @[Xbar.scala:166:55]
assign in_1_a_bits_source = _in_1_a_bits_source_T; // @[Xbar.scala:159:18, :166:55]
wire _out_0_a_valid_T_4; // @[Arbiter.scala:96:24]
assign anonOut_a_valid = out_0_a_valid; // @[Xbar.scala:216:19]
wire [2:0] _out_0_a_bits_WIRE_opcode; // @[Mux.scala:30:73]
assign anonOut_a_bits_opcode = out_0_a_bits_opcode; // @[Xbar.scala:216:19]
wire [2:0] _out_0_a_bits_WIRE_param; // @[Mux.scala:30:73]
assign anonOut_a_bits_param = out_0_a_bits_param; // @[Xbar.scala:216:19]
wire [3:0] _out_0_a_bits_WIRE_size; // @[Mux.scala:30:73]
assign anonOut_a_bits_size = out_0_a_bits_size; // @[Xbar.scala:216:19]
wire [2:0] _out_0_a_bits_WIRE_source; // @[Mux.scala:30:73]
assign anonOut_a_bits_source = out_0_a_bits_source; // @[Xbar.scala:216:19]
wire [31:0] _out_0_a_bits_WIRE_address; // @[Mux.scala:30:73]
assign anonOut_a_bits_address = out_0_a_bits_address; // @[Xbar.scala:216:19]
wire [7:0] _out_0_a_bits_WIRE_mask; // @[Mux.scala:30:73]
assign anonOut_a_bits_mask = out_0_a_bits_mask; // @[Xbar.scala:216:19]
wire [63:0] _out_0_a_bits_WIRE_data; // @[Mux.scala:30:73]
assign anonOut_a_bits_data = out_0_a_bits_data; // @[Xbar.scala:216:19]
wire _out_0_a_bits_WIRE_corrupt; // @[Mux.scala:30:73]
assign anonOut_a_bits_corrupt = out_0_a_bits_corrupt; // @[Xbar.scala:216:19]
wire _portsBIO_out_0_b_ready_WIRE; // @[Mux.scala:30:73]
assign anonOut_b_ready = out_0_b_ready; // @[Xbar.scala:216:19]
assign portsBIO_filtered_0_bits_opcode = out_0_b_bits_opcode; // @[Xbar.scala:216:19, :352:24]
wire [2:0] portsBIO_filtered_1_bits_opcode = out_0_b_bits_opcode; // @[Xbar.scala:216:19, :352:24]
assign portsBIO_filtered_0_bits_param = out_0_b_bits_param; // @[Xbar.scala:216:19, :352:24]
wire [1:0] portsBIO_filtered_1_bits_param = out_0_b_bits_param; // @[Xbar.scala:216:19, :352:24]
assign portsBIO_filtered_0_bits_size = out_0_b_bits_size; // @[Xbar.scala:216:19, :352:24]
wire [3:0] portsBIO_filtered_1_bits_size = out_0_b_bits_size; // @[Xbar.scala:216:19, :352:24]
wire [2:0] _requestBOI_uncommonBits_T = out_0_b_bits_source; // @[Xbar.scala:216:19]
assign portsBIO_filtered_0_bits_source = out_0_b_bits_source; // @[Xbar.scala:216:19, :352:24]
wire [2:0] portsBIO_filtered_1_bits_source = out_0_b_bits_source; // @[Xbar.scala:216:19, :352:24]
assign portsBIO_filtered_0_bits_address = out_0_b_bits_address; // @[Xbar.scala:216:19, :352:24]
wire [31:0] portsBIO_filtered_1_bits_address = out_0_b_bits_address; // @[Xbar.scala:216:19, :352:24]
assign portsBIO_filtered_0_bits_mask = out_0_b_bits_mask; // @[Xbar.scala:216:19, :352:24]
wire [7:0] portsBIO_filtered_1_bits_mask = out_0_b_bits_mask; // @[Xbar.scala:216:19, :352:24]
assign portsBIO_filtered_0_bits_data = out_0_b_bits_data; // @[Xbar.scala:216:19, :352:24]
wire [63:0] portsBIO_filtered_1_bits_data = out_0_b_bits_data; // @[Xbar.scala:216:19, :352:24]
assign portsBIO_filtered_0_bits_corrupt = out_0_b_bits_corrupt; // @[Xbar.scala:216:19, :352:24]
wire portsBIO_filtered_1_bits_corrupt = out_0_b_bits_corrupt; // @[Xbar.scala:216:19, :352:24]
assign portsCOI_filtered_0_ready = out_0_c_ready; // @[Xbar.scala:216:19, :352:24]
wire portsCOI_filtered_0_valid; // @[Xbar.scala:352:24]
assign anonOut_c_valid = out_0_c_valid; // @[Xbar.scala:216:19]
assign anonOut_c_bits_opcode = out_0_c_bits_opcode; // @[Xbar.scala:216:19]
assign anonOut_c_bits_param = out_0_c_bits_param; // @[Xbar.scala:216:19]
assign anonOut_c_bits_size = out_0_c_bits_size; // @[Xbar.scala:216:19]
assign anonOut_c_bits_source = out_0_c_bits_source; // @[Xbar.scala:216:19]
assign anonOut_c_bits_address = out_0_c_bits_address; // @[Xbar.scala:216:19]
assign anonOut_c_bits_data = out_0_c_bits_data; // @[Xbar.scala:216:19]
assign anonOut_c_bits_corrupt = out_0_c_bits_corrupt; // @[Xbar.scala:216:19]
wire _portsDIO_out_0_d_ready_WIRE; // @[Mux.scala:30:73]
assign anonOut_d_ready = out_0_d_ready; // @[Xbar.scala:216:19]
assign portsDIO_filtered_0_bits_opcode = out_0_d_bits_opcode; // @[Xbar.scala:216:19, :352:24]
assign portsDIO_filtered_1_bits_opcode = out_0_d_bits_opcode; // @[Xbar.scala:216:19, :352:24]
assign portsDIO_filtered_0_bits_param = out_0_d_bits_param; // @[Xbar.scala:216:19, :352:24]
assign portsDIO_filtered_1_bits_param = out_0_d_bits_param; // @[Xbar.scala:216:19, :352:24]
assign portsDIO_filtered_0_bits_size = out_0_d_bits_size; // @[Xbar.scala:216:19, :352:24]
assign portsDIO_filtered_1_bits_size = out_0_d_bits_size; // @[Xbar.scala:216:19, :352:24]
wire [2:0] _requestDOI_uncommonBits_T = out_0_d_bits_source; // @[Xbar.scala:216:19]
assign portsDIO_filtered_0_bits_source = out_0_d_bits_source; // @[Xbar.scala:216:19, :352:24]
assign portsDIO_filtered_1_bits_source = out_0_d_bits_source; // @[Xbar.scala:216:19, :352:24]
assign portsDIO_filtered_0_bits_sink = out_0_d_bits_sink; // @[Xbar.scala:216:19, :352:24]
assign portsDIO_filtered_1_bits_sink = out_0_d_bits_sink; // @[Xbar.scala:216:19, :352:24]
assign portsDIO_filtered_0_bits_denied = out_0_d_bits_denied; // @[Xbar.scala:216:19, :352:24]
assign portsDIO_filtered_1_bits_denied = out_0_d_bits_denied; // @[Xbar.scala:216:19, :352:24]
assign portsDIO_filtered_0_bits_data = out_0_d_bits_data; // @[Xbar.scala:216:19, :352:24]
assign portsDIO_filtered_1_bits_data = out_0_d_bits_data; // @[Xbar.scala:216:19, :352:24]
assign portsDIO_filtered_0_bits_corrupt = out_0_d_bits_corrupt; // @[Xbar.scala:216:19, :352:24]
assign portsDIO_filtered_1_bits_corrupt = out_0_d_bits_corrupt; // @[Xbar.scala:216:19, :352:24]
assign portsEOI_filtered_0_ready = out_0_e_ready; // @[Xbar.scala:216:19, :352:24]
wire portsEOI_filtered_0_valid; // @[Xbar.scala:352:24]
assign anonOut_e_valid = out_0_e_valid; // @[Xbar.scala:216:19]
assign _anonOut_e_bits_sink_T = out_0_e_bits_sink; // @[Xbar.scala:156:69, :216:19]
assign out_0_d_bits_sink = _out_0_d_bits_sink_T; // @[Xbar.scala:216:19, :251:53]
assign anonOut_e_bits_sink = _anonOut_e_bits_sink_T; // @[Xbar.scala:156:69]
wire [32:0] _requestAIO_T_1 = {1'h0, _requestAIO_T}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _requestAIO_T_6 = {1'h0, _requestAIO_T_5}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _requestCIO_T_1 = {1'h0, _requestCIO_T}; // @[Parameters.scala:137:{31,41}]
wire [1:0] requestBOI_uncommonBits = _requestBOI_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire _requestBOI_T = out_0_b_bits_source[2]; // @[Xbar.scala:216:19]
wire _requestBOI_T_1 = ~_requestBOI_T; // @[Parameters.scala:54:{10,32}]
wire _requestBOI_T_3 = _requestBOI_T_1; // @[Parameters.scala:54:{32,67}]
wire requestBOI_0_0 = _requestBOI_T_3; // @[Parameters.scala:54:67, :56:48]
wire _portsBIO_filtered_0_valid_T = requestBOI_0_0; // @[Xbar.scala:355:54]
wire requestBOI_0_1 = out_0_b_bits_source == 3'h4; // @[Xbar.scala:216:19]
wire _portsBIO_filtered_1_valid_T = requestBOI_0_1; // @[Xbar.scala:355:54]
wire [1:0] requestDOI_uncommonBits = _requestDOI_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire _requestDOI_T = out_0_d_bits_source[2]; // @[Xbar.scala:216:19]
wire _requestDOI_T_1 = ~_requestDOI_T; // @[Parameters.scala:54:{10,32}]
wire _requestDOI_T_3 = _requestDOI_T_1; // @[Parameters.scala:54:{32,67}]
wire requestDOI_0_0 = _requestDOI_T_3; // @[Parameters.scala:54:67, :56:48]
wire _portsDIO_filtered_0_valid_T = requestDOI_0_0; // @[Xbar.scala:355:54]
wire requestDOI_0_1 = out_0_d_bits_source == 3'h4; // @[Xbar.scala:216:19]
wire _portsDIO_filtered_1_valid_T = requestDOI_0_1; // @[Xbar.scala:355:54]
wire [2:0] requestEIO_uncommonBits = _requestEIO_uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire [26:0] _beatsAI_decode_T = 27'hFFF << in_0_a_bits_size; // @[package.scala:243:71]
wire [11:0] _beatsAI_decode_T_1 = _beatsAI_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _beatsAI_decode_T_2 = ~_beatsAI_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] beatsAI_decode = _beatsAI_decode_T_2[11:3]; // @[package.scala:243:46]
wire _beatsAI_opdata_T = in_0_a_bits_opcode[2]; // @[Xbar.scala:159:18]
wire beatsAI_opdata = ~_beatsAI_opdata_T; // @[Edges.scala:92:{28,37}]
wire [8:0] beatsAI_0 = beatsAI_opdata ? beatsAI_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
wire [26:0] _beatsAI_decode_T_3 = 27'hFFF << in_1_a_bits_size; // @[package.scala:243:71]
wire [11:0] _beatsAI_decode_T_4 = _beatsAI_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _beatsAI_decode_T_5 = ~_beatsAI_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] beatsAI_decode_1 = _beatsAI_decode_T_5[11:3]; // @[package.scala:243:46]
wire _beatsAI_opdata_T_1 = in_1_a_bits_opcode[2]; // @[Xbar.scala:159:18]
wire beatsAI_opdata_1 = ~_beatsAI_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [8:0] beatsAI_1 = beatsAI_opdata_1 ? beatsAI_decode_1 : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
wire [26:0] _beatsBO_decode_T = 27'hFFF << out_0_b_bits_size; // @[package.scala:243:71]
wire [11:0] _beatsBO_decode_T_1 = _beatsBO_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _beatsBO_decode_T_2 = ~_beatsBO_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] beatsBO_decode = _beatsBO_decode_T_2[11:3]; // @[package.scala:243:46]
wire _beatsBO_opdata_T = out_0_b_bits_opcode[2]; // @[Xbar.scala:216:19]
wire beatsBO_opdata = ~_beatsBO_opdata_T; // @[Edges.scala:97:{28,37}]
wire [26:0] _beatsCI_decode_T = 27'hFFF << in_0_c_bits_size; // @[package.scala:243:71]
wire [11:0] _beatsCI_decode_T_1 = _beatsCI_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _beatsCI_decode_T_2 = ~_beatsCI_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] beatsCI_decode = _beatsCI_decode_T_2[11:3]; // @[package.scala:243:46]
wire beatsCI_opdata = in_0_c_bits_opcode[0]; // @[Xbar.scala:159:18]
wire [8:0] beatsCI_0 = beatsCI_opdata ? beatsCI_decode : 9'h0; // @[Edges.scala:102:36, :220:59, :221:14]
wire [26:0] _beatsDO_decode_T = 27'hFFF << out_0_d_bits_size; // @[package.scala:243:71]
wire [11:0] _beatsDO_decode_T_1 = _beatsDO_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _beatsDO_decode_T_2 = ~_beatsDO_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] beatsDO_decode = _beatsDO_decode_T_2[11:3]; // @[package.scala:243:46]
wire beatsDO_opdata = out_0_d_bits_opcode[0]; // @[Xbar.scala:216:19]
wire [8:0] beatsDO_0 = beatsDO_opdata ? beatsDO_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
wire _filtered_0_ready_T; // @[Arbiter.scala:94:31]
assign in_0_a_ready = portsAOI_filtered_0_ready; // @[Xbar.scala:159:18, :352:24]
wire portsAOI_filtered_0_valid; // @[Xbar.scala:352:24]
assign portsAOI_filtered_0_valid = _portsAOI_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40]
wire _filtered_0_ready_T_1; // @[Arbiter.scala:94:31]
assign in_1_a_ready = portsAOI_filtered_1_0_ready; // @[Xbar.scala:159:18, :352:24]
wire portsAOI_filtered_1_0_valid; // @[Xbar.scala:352:24]
assign portsAOI_filtered_1_0_valid = _portsAOI_filtered_0_valid_T_3; // @[Xbar.scala:352:24, :355:40]
wire _portsBIO_filtered_0_valid_T_1; // @[Xbar.scala:355:40]
assign in_0_b_valid = portsBIO_filtered_0_valid; // @[Xbar.scala:159:18, :352:24]
assign in_0_b_bits_opcode = portsBIO_filtered_0_bits_opcode; // @[Xbar.scala:159:18, :352:24]
assign in_0_b_bits_param = portsBIO_filtered_0_bits_param; // @[Xbar.scala:159:18, :352:24]
assign in_0_b_bits_size = portsBIO_filtered_0_bits_size; // @[Xbar.scala:159:18, :352:24]
assign in_0_b_bits_source = portsBIO_filtered_0_bits_source; // @[Xbar.scala:159:18, :352:24]
assign in_0_b_bits_address = portsBIO_filtered_0_bits_address; // @[Xbar.scala:159:18, :352:24]
assign in_0_b_bits_mask = portsBIO_filtered_0_bits_mask; // @[Xbar.scala:159:18, :352:24]
assign in_0_b_bits_data = portsBIO_filtered_0_bits_data; // @[Xbar.scala:159:18, :352:24]
assign in_0_b_bits_corrupt = portsBIO_filtered_0_bits_corrupt; // @[Xbar.scala:159:18, :352:24]
wire _portsBIO_filtered_1_valid_T_1; // @[Xbar.scala:355:40]
wire portsBIO_filtered_1_valid; // @[Xbar.scala:352:24]
assign _portsBIO_filtered_0_valid_T_1 = out_0_b_valid & _portsBIO_filtered_0_valid_T; // @[Xbar.scala:216:19, :355:{40,54}]
assign portsBIO_filtered_0_valid = _portsBIO_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40]
assign _portsBIO_filtered_1_valid_T_1 = out_0_b_valid & _portsBIO_filtered_1_valid_T; // @[Xbar.scala:216:19, :355:{40,54}]
assign portsBIO_filtered_1_valid = _portsBIO_filtered_1_valid_T_1; // @[Xbar.scala:352:24, :355:40]
wire _portsBIO_out_0_b_ready_T = requestBOI_0_0 & portsBIO_filtered_0_ready; // @[Mux.scala:30:73]
wire _portsBIO_out_0_b_ready_T_2 = _portsBIO_out_0_b_ready_T; // @[Mux.scala:30:73]
assign _portsBIO_out_0_b_ready_WIRE = _portsBIO_out_0_b_ready_T_2; // @[Mux.scala:30:73]
assign out_0_b_ready = _portsBIO_out_0_b_ready_WIRE; // @[Mux.scala:30:73]
assign in_0_c_ready = portsCOI_filtered_0_ready; // @[Xbar.scala:159:18, :352:24]
assign out_0_c_valid = portsCOI_filtered_0_valid; // @[Xbar.scala:216:19, :352:24]
assign out_0_c_bits_opcode = portsCOI_filtered_0_bits_opcode; // @[Xbar.scala:216:19, :352:24]
assign out_0_c_bits_param = portsCOI_filtered_0_bits_param; // @[Xbar.scala:216:19, :352:24]
assign out_0_c_bits_size = portsCOI_filtered_0_bits_size; // @[Xbar.scala:216:19, :352:24]
assign out_0_c_bits_source = portsCOI_filtered_0_bits_source; // @[Xbar.scala:216:19, :352:24]
assign out_0_c_bits_address = portsCOI_filtered_0_bits_address; // @[Xbar.scala:216:19, :352:24]
assign out_0_c_bits_data = portsCOI_filtered_0_bits_data; // @[Xbar.scala:216:19, :352:24]
assign out_0_c_bits_corrupt = portsCOI_filtered_0_bits_corrupt; // @[Xbar.scala:216:19, :352:24]
assign portsCOI_filtered_0_valid = _portsCOI_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40]
wire _portsDIO_filtered_0_valid_T_1; // @[Xbar.scala:355:40]
assign in_0_d_valid = portsDIO_filtered_0_valid; // @[Xbar.scala:159:18, :352:24]
assign in_0_d_bits_opcode = portsDIO_filtered_0_bits_opcode; // @[Xbar.scala:159:18, :352:24]
assign in_0_d_bits_param = portsDIO_filtered_0_bits_param; // @[Xbar.scala:159:18, :352:24]
assign in_0_d_bits_size = portsDIO_filtered_0_bits_size; // @[Xbar.scala:159:18, :352:24]
assign in_0_d_bits_source = portsDIO_filtered_0_bits_source; // @[Xbar.scala:159:18, :352:24]
assign in_0_d_bits_sink = portsDIO_filtered_0_bits_sink; // @[Xbar.scala:159:18, :352:24]
assign in_0_d_bits_denied = portsDIO_filtered_0_bits_denied; // @[Xbar.scala:159:18, :352:24]
assign in_0_d_bits_data = portsDIO_filtered_0_bits_data; // @[Xbar.scala:159:18, :352:24]
assign in_0_d_bits_corrupt = portsDIO_filtered_0_bits_corrupt; // @[Xbar.scala:159:18, :352:24]
wire _portsDIO_filtered_1_valid_T_1; // @[Xbar.scala:355:40]
assign in_1_d_valid = portsDIO_filtered_1_valid; // @[Xbar.scala:159:18, :352:24]
assign in_1_d_bits_opcode = portsDIO_filtered_1_bits_opcode; // @[Xbar.scala:159:18, :352:24]
assign in_1_d_bits_param = portsDIO_filtered_1_bits_param; // @[Xbar.scala:159:18, :352:24]
assign in_1_d_bits_size = portsDIO_filtered_1_bits_size; // @[Xbar.scala:159:18, :352:24]
assign in_1_d_bits_source = portsDIO_filtered_1_bits_source; // @[Xbar.scala:159:18, :352:24]
assign in_1_d_bits_sink = portsDIO_filtered_1_bits_sink; // @[Xbar.scala:159:18, :352:24]
assign in_1_d_bits_denied = portsDIO_filtered_1_bits_denied; // @[Xbar.scala:159:18, :352:24]
assign in_1_d_bits_data = portsDIO_filtered_1_bits_data; // @[Xbar.scala:159:18, :352:24]
assign in_1_d_bits_corrupt = portsDIO_filtered_1_bits_corrupt; // @[Xbar.scala:159:18, :352:24]
assign _portsDIO_filtered_0_valid_T_1 = out_0_d_valid & _portsDIO_filtered_0_valid_T; // @[Xbar.scala:216:19, :355:{40,54}]
assign portsDIO_filtered_0_valid = _portsDIO_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40]
assign _portsDIO_filtered_1_valid_T_1 = out_0_d_valid & _portsDIO_filtered_1_valid_T; // @[Xbar.scala:216:19, :355:{40,54}]
assign portsDIO_filtered_1_valid = _portsDIO_filtered_1_valid_T_1; // @[Xbar.scala:352:24, :355:40]
wire _portsDIO_out_0_d_ready_T = requestDOI_0_0 & portsDIO_filtered_0_ready; // @[Mux.scala:30:73]
wire _portsDIO_out_0_d_ready_T_1 = requestDOI_0_1 & portsDIO_filtered_1_ready; // @[Mux.scala:30:73]
wire _portsDIO_out_0_d_ready_T_2 = _portsDIO_out_0_d_ready_T | _portsDIO_out_0_d_ready_T_1; // @[Mux.scala:30:73]
assign _portsDIO_out_0_d_ready_WIRE = _portsDIO_out_0_d_ready_T_2; // @[Mux.scala:30:73]
assign out_0_d_ready = _portsDIO_out_0_d_ready_WIRE; // @[Mux.scala:30:73]
assign in_0_e_ready = portsEOI_filtered_0_ready; // @[Xbar.scala:159:18, :352:24]
assign out_0_e_valid = portsEOI_filtered_0_valid; // @[Xbar.scala:216:19, :352:24]
assign out_0_e_bits_sink = portsEOI_filtered_0_bits_sink; // @[Xbar.scala:216:19, :352:24]
assign portsEOI_filtered_0_valid = _portsEOI_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40]
reg [8:0] beatsLeft; // @[Arbiter.scala:60:30]
wire idle = beatsLeft == 9'h0; // @[Arbiter.scala:60:30, :61:28]
wire latch = idle & out_0_a_ready; // @[Xbar.scala:216:19]
wire [1:0] _readys_T = {portsAOI_filtered_1_0_valid, portsAOI_filtered_0_valid}; // @[Xbar.scala:352:24]
wire [1:0] readys_valid = _readys_T; // @[Arbiter.scala:21:23, :68:51]
wire _readys_T_1 = readys_valid == _readys_T; // @[Arbiter.scala:21:23, :22:19, :68:51]
wire _readys_T_3 = ~_readys_T_2; // @[Arbiter.scala:22:12]
wire _readys_T_4 = ~_readys_T_1; // @[Arbiter.scala:22:{12,19}]
reg [1:0] readys_mask; // @[Arbiter.scala:23:23]
wire [1:0] _readys_filter_T = ~readys_mask; // @[Arbiter.scala:23:23, :24:30]
wire [1:0] _readys_filter_T_1 = readys_valid & _readys_filter_T; // @[Arbiter.scala:21:23, :24:{28,30}]
wire [3:0] readys_filter = {_readys_filter_T_1, readys_valid}; // @[Arbiter.scala:21:23, :24:{21,28}]
wire [2:0] _readys_unready_T = readys_filter[3:1]; // @[package.scala:262:48]
wire [3:0] _readys_unready_T_1 = {readys_filter[3], readys_filter[2:0] | _readys_unready_T}; // @[package.scala:262:{43,48}]
wire [3:0] _readys_unready_T_2 = _readys_unready_T_1; // @[package.scala:262:43, :263:17]
wire [2:0] _readys_unready_T_3 = _readys_unready_T_2[3:1]; // @[package.scala:263:17]
wire [3:0] _readys_unready_T_4 = {readys_mask, 2'h0}; // @[Arbiter.scala:23:23, :25:66]
wire [3:0] readys_unready = {1'h0, _readys_unready_T_3} | _readys_unready_T_4; // @[Arbiter.scala:25:{52,58,66}]
wire [1:0] _readys_readys_T = readys_unready[3:2]; // @[Arbiter.scala:25:58, :26:29]
wire [1:0] _readys_readys_T_1 = readys_unready[1:0]; // @[Arbiter.scala:25:58, :26:48]
wire [1:0] _readys_readys_T_2 = _readys_readys_T & _readys_readys_T_1; // @[Arbiter.scala:26:{29,39,48}]
wire [1:0] readys_readys = ~_readys_readys_T_2; // @[Arbiter.scala:26:{18,39}]
wire [1:0] _readys_T_7 = readys_readys; // @[Arbiter.scala:26:18, :30:11]
wire _readys_T_5 = |readys_valid; // @[Arbiter.scala:21:23, :27:27]
wire _readys_T_6 = latch & _readys_T_5; // @[Arbiter.scala:27:{18,27}, :62:24]
wire [1:0] _readys_mask_T = readys_readys & readys_valid; // @[Arbiter.scala:21:23, :26:18, :28:29]
wire [2:0] _readys_mask_T_1 = {_readys_mask_T, 1'h0}; // @[package.scala:253:48]
wire [1:0] _readys_mask_T_2 = _readys_mask_T_1[1:0]; // @[package.scala:253:{48,53}]
wire [1:0] _readys_mask_T_3 = _readys_mask_T | _readys_mask_T_2; // @[package.scala:253:{43,53}]
wire [1:0] _readys_mask_T_4 = _readys_mask_T_3; // @[package.scala:253:43, :254:17]
wire _readys_T_8 = _readys_T_7[0]; // @[Arbiter.scala:30:11, :68:76]
wire readys_0 = _readys_T_8; // @[Arbiter.scala:68:{27,76}]
wire _readys_T_9 = _readys_T_7[1]; // @[Arbiter.scala:30:11, :68:76]
wire readys_1 = _readys_T_9; // @[Arbiter.scala:68:{27,76}]
wire _winner_T = readys_0 & portsAOI_filtered_0_valid; // @[Xbar.scala:352:24]
wire winner_0 = _winner_T; // @[Arbiter.scala:71:{27,69}]
wire _winner_T_1 = readys_1 & portsAOI_filtered_1_0_valid; // @[Xbar.scala:352:24]
wire winner_1 = _winner_T_1; // @[Arbiter.scala:71:{27,69}]
wire prefixOR_1 = winner_0; // @[Arbiter.scala:71:27, :76:48]
wire _prefixOR_T = prefixOR_1 | winner_1; // @[Arbiter.scala:71:27, :76:48]
wire _out_0_a_valid_T = portsAOI_filtered_0_valid | portsAOI_filtered_1_0_valid; // @[Xbar.scala:352:24] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftRegisterPriorityQueue.scala:
package compressacc
import chisel3._
import chisel3.util._
import chisel3.util._
// TODO : support enq & deq at the same cycle
class PriorityQueueStageIO(keyWidth: Int, value: ValueInfo) extends Bundle {
val output_prev = KeyValue(keyWidth, value)
val output_nxt = KeyValue(keyWidth, value)
val input_prev = Flipped(KeyValue(keyWidth, value))
val input_nxt = Flipped(KeyValue(keyWidth, value))
val cmd = Flipped(Valid(UInt(1.W)))
val insert_here = Input(Bool())
val cur_input_keyval = Flipped(KeyValue(keyWidth, value))
val cur_output_keyval = KeyValue(keyWidth, value)
}
class PriorityQueueStage(keyWidth: Int, value: ValueInfo) extends Module {
val io = IO(new PriorityQueueStageIO(keyWidth, value))
dontTouch(io)
val CMD_DEQ = 0.U
val CMD_ENQ = 1.U
val MAX_VALUE = (1 << keyWidth) - 1
val key_reg = RegInit(MAX_VALUE.U(keyWidth.W))
val value_reg = Reg(value)
io.output_prev.key := key_reg
io.output_prev.value := value_reg
io.output_nxt.key := key_reg
io.output_nxt.value := value_reg
io.cur_output_keyval.key := key_reg
io.cur_output_keyval.value := value_reg
when (io.cmd.valid) {
switch (io.cmd.bits) {
is (CMD_DEQ) {
key_reg := io.input_nxt.key
value_reg := io.input_nxt.value
}
is (CMD_ENQ) {
when (io.insert_here) {
key_reg := io.cur_input_keyval.key
value_reg := io.cur_input_keyval.value
} .elsewhen (key_reg >= io.cur_input_keyval.key) {
key_reg := io.input_prev.key
value_reg := io.input_prev.value
} .otherwise {
// do nothing
}
}
}
}
}
object PriorityQueueStage {
def apply(keyWidth: Int, v: ValueInfo): PriorityQueueStage = new PriorityQueueStage(keyWidth, v)
}
// TODO
// - This design is not scalable as the enqued_keyval is broadcasted to all the stages
// - Add pipeline registers later
class PriorityQueueIO(queSize: Int, keyWidth: Int, value: ValueInfo) extends Bundle {
val cnt_bits = log2Ceil(queSize+1)
val counter = Output(UInt(cnt_bits.W))
val enq = Flipped(Decoupled(KeyValue(keyWidth, value)))
val deq = Decoupled(KeyValue(keyWidth, value))
}
class PriorityQueue(queSize: Int, keyWidth: Int, value: ValueInfo) extends Module {
val keyWidthInternal = keyWidth + 1
val CMD_DEQ = 0.U
val CMD_ENQ = 1.U
val io = IO(new PriorityQueueIO(queSize, keyWidthInternal, value))
dontTouch(io)
val MAX_VALUE = ((1 << keyWidthInternal) - 1).U
val cnt_bits = log2Ceil(queSize+1)
// do not consider cases where we are inserting more entries then the queSize
val counter = RegInit(0.U(cnt_bits.W))
io.counter := counter
val full = (counter === queSize.U)
val empty = (counter === 0.U)
io.deq.valid := !empty
io.enq.ready := !full
when (io.enq.fire) {
counter := counter + 1.U
}
when (io.deq.fire) {
counter := counter - 1.U
}
val cmd_valid = io.enq.valid || io.deq.ready
val cmd = Mux(io.enq.valid, CMD_ENQ, CMD_DEQ)
assert(!(io.enq.valid && io.deq.ready))
val stages = Seq.fill(queSize)(Module(new PriorityQueueStage(keyWidthInternal, value)))
for (i <- 0 until (queSize - 1)) {
stages(i+1).io.input_prev <> stages(i).io.output_nxt
stages(i).io.input_nxt <> stages(i+1).io.output_prev
}
stages(queSize-1).io.input_nxt.key := MAX_VALUE
// stages(queSize-1).io.input_nxt.value :=
stages(queSize-1).io.input_nxt.value.symbol := 0.U
// stages(queSize-1).io.input_nxt.value.child(0) := 0.U
// stages(queSize-1).io.input_nxt.value.child(1) := 0.U
stages(0).io.input_prev.key := io.enq.bits.key
stages(0).io.input_prev.value <> io.enq.bits.value
for (i <- 0 until queSize) {
stages(i).io.cmd.valid := cmd_valid
stages(i).io.cmd.bits := cmd
stages(i).io.cur_input_keyval <> io.enq.bits
}
val is_large_or_equal = WireInit(VecInit(Seq.fill(queSize)(false.B)))
for (i <- 0 until queSize) {
is_large_or_equal(i) := (stages(i).io.cur_output_keyval.key >= io.enq.bits.key)
}
val is_large_or_equal_cat = Wire(UInt(queSize.W))
is_large_or_equal_cat := Cat(is_large_or_equal.reverse)
val insert_here_idx = PriorityEncoder(is_large_or_equal_cat)
for (i <- 0 until queSize) {
when (i.U === insert_here_idx) {
stages(i).io.insert_here := true.B
} .otherwise {
stages(i).io.insert_here := false.B
}
}
io.deq.bits <> stages(0).io.output_prev
}
| module PriorityQueueStage_167( // @[ShiftRegisterPriorityQueue.scala:21:7]
input clock, // @[ShiftRegisterPriorityQueue.scala:21:7]
input reset, // @[ShiftRegisterPriorityQueue.scala:21:7]
output [30:0] io_output_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_output_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [30:0] io_output_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_output_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_input_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_input_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_input_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_input_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_cmd_valid, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_cmd_bits, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_insert_here, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_cur_input_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_cur_input_keyval_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [30:0] io_cur_output_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_cur_output_keyval_value_symbol // @[ShiftRegisterPriorityQueue.scala:22:14]
);
wire [30:0] io_input_prev_key_0 = io_input_prev_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_input_prev_value_symbol_0 = io_input_prev_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_input_nxt_key_0 = io_input_nxt_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_input_nxt_value_symbol_0 = io_input_nxt_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_cmd_valid_0 = io_cmd_valid; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_cmd_bits_0 = io_cmd_bits; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_insert_here_0 = io_insert_here; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_cur_input_keyval_key_0 = io_cur_input_keyval_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_cur_input_keyval_value_symbol_0 = io_cur_input_keyval_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
reg [30:0] key_reg; // @[ShiftRegisterPriorityQueue.scala:30:24]
assign io_output_prev_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
assign io_output_nxt_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
assign io_cur_output_keyval_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
reg [9:0] value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:31:22]
assign io_output_prev_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
assign io_output_nxt_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
assign io_cur_output_keyval_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
wire _T_2 = key_reg >= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24, :52:30]
always @(posedge clock) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (reset) // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= 31'h7FFFFFFF; // @[ShiftRegisterPriorityQueue.scala:30:24]
else if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30]
key_reg <= io_input_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
end
else // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= io_input_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
end
if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7]
value_reg_symbol <= io_cur_input_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30]
value_reg_symbol <= io_input_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
end
else // @[ShiftRegisterPriorityQueue.scala:21:7]
value_reg_symbol <= io_input_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
end
always @(posedge)
assign io_output_prev_key = io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_prev_value_symbol = io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_nxt_key = io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_nxt_value_symbol = io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_cur_output_keyval_key = io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_cur_output_keyval_value_symbol = io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File RegisterRouter.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.diplomacy.{AddressSet, TransferSizes}
import freechips.rocketchip.resources.{Device, Resource, ResourceBindings}
import freechips.rocketchip.prci.{NoCrossing}
import freechips.rocketchip.regmapper.{RegField, RegMapper, RegMapperParams, RegMapperInput, RegisterRouter}
import freechips.rocketchip.util.{BundleField, ControlKey, ElaborationArtefacts, GenRegDescsAnno}
import scala.math.min
class TLRegisterRouterExtraBundle(val sourceBits: Int, val sizeBits: Int) extends Bundle {
val source = UInt((sourceBits max 1).W)
val size = UInt((sizeBits max 1).W)
}
case object TLRegisterRouterExtra extends ControlKey[TLRegisterRouterExtraBundle]("tlrr_extra")
case class TLRegisterRouterExtraField(sourceBits: Int, sizeBits: Int) extends BundleField[TLRegisterRouterExtraBundle](TLRegisterRouterExtra, Output(new TLRegisterRouterExtraBundle(sourceBits, sizeBits)), x => {
x.size := 0.U
x.source := 0.U
})
/** TLRegisterNode is a specialized TL SinkNode that encapsulates MMIO registers.
* It provides functionality for describing and outputting metdata about the registers in several formats.
* It also provides a concrete implementation of a regmap function that will be used
* to wire a map of internal registers associated with this node to the node's interconnect port.
*/
case class TLRegisterNode(
address: Seq[AddressSet],
device: Device,
deviceKey: String = "reg/control",
concurrency: Int = 0,
beatBytes: Int = 4,
undefZero: Boolean = true,
executable: Boolean = false)(
implicit valName: ValName)
extends SinkNode(TLImp)(Seq(TLSlavePortParameters.v1(
Seq(TLSlaveParameters.v1(
address = address,
resources = Seq(Resource(device, deviceKey)),
executable = executable,
supportsGet = TransferSizes(1, beatBytes),
supportsPutPartial = TransferSizes(1, beatBytes),
supportsPutFull = TransferSizes(1, beatBytes),
fifoId = Some(0))), // requests are handled in order
beatBytes = beatBytes,
minLatency = min(concurrency, 1)))) with TLFormatNode // the Queue adds at most one cycle
{
val size = 1 << log2Ceil(1 + address.map(_.max).max - address.map(_.base).min)
require (size >= beatBytes)
address.foreach { case a =>
require (a.widen(size-1).base == address.head.widen(size-1).base,
s"TLRegisterNode addresses (${address}) must be aligned to its size ${size}")
}
// Calling this method causes the matching TL2 bundle to be
// configured to route all requests to the listed RegFields.
def regmap(mapping: RegField.Map*) = {
val (bundleIn, edge) = this.in(0)
val a = bundleIn.a
val d = bundleIn.d
val fields = TLRegisterRouterExtraField(edge.bundle.sourceBits, edge.bundle.sizeBits) +: a.bits.params.echoFields
val params = RegMapperParams(log2Up(size/beatBytes), beatBytes, fields)
val in = Wire(Decoupled(new RegMapperInput(params)))
in.bits.read := a.bits.opcode === TLMessages.Get
in.bits.index := edge.addr_hi(a.bits)
in.bits.data := a.bits.data
in.bits.mask := a.bits.mask
Connectable.waiveUnmatched(in.bits.extra, a.bits.echo) match {
case (lhs, rhs) => lhs :<= rhs
}
val a_extra = in.bits.extra(TLRegisterRouterExtra)
a_extra.source := a.bits.source
a_extra.size := a.bits.size
// Invoke the register map builder
val out = RegMapper(beatBytes, concurrency, undefZero, in, mapping:_*)
// No flow control needed
in.valid := a.valid
a.ready := in.ready
d.valid := out.valid
out.ready := d.ready
// We must restore the size to enable width adapters to work
val d_extra = out.bits.extra(TLRegisterRouterExtra)
d.bits := edge.AccessAck(toSource = d_extra.source, lgSize = d_extra.size)
// avoid a Mux on the data bus by manually overriding two fields
d.bits.data := out.bits.data
Connectable.waiveUnmatched(d.bits.echo, out.bits.extra) match {
case (lhs, rhs) => lhs :<= rhs
}
d.bits.opcode := Mux(out.bits.read, TLMessages.AccessAckData, TLMessages.AccessAck)
// Tie off unused channels
bundleIn.b.valid := false.B
bundleIn.c.ready := true.B
bundleIn.e.ready := true.B
genRegDescsJson(mapping:_*)
}
def genRegDescsJson(mapping: RegField.Map*): Unit = {
// Dump out the register map for documentation purposes.
val base = address.head.base
val baseHex = s"0x${base.toInt.toHexString}"
val name = s"${device.describe(ResourceBindings()).name}.At${baseHex}"
val json = GenRegDescsAnno.serialize(base, name, mapping:_*)
var suffix = 0
while( ElaborationArtefacts.contains(s"${baseHex}.${suffix}.regmap.json")) {
suffix = suffix + 1
}
ElaborationArtefacts.add(s"${baseHex}.${suffix}.regmap.json", json)
val module = Module.currentModule.get.asInstanceOf[RawModule]
GenRegDescsAnno.anno(
module,
base,
mapping:_*)
}
}
/** Mix HasTLControlRegMap into any subclass of RegisterRouter to gain helper functions for attaching a device control register map to TileLink.
* - The intended use case is that controlNode will diplomatically publish a SW-visible device's memory-mapped control registers.
* - Use the clock crossing helper controlXing to externally connect controlNode to a TileLink interconnect.
* - Use the mapping helper function regmap to internally fill out the space of device control registers.
*/
trait HasTLControlRegMap { this: RegisterRouter =>
protected val controlNode = TLRegisterNode(
address = address,
device = device,
deviceKey = "reg/control",
concurrency = concurrency,
beatBytes = beatBytes,
undefZero = undefZero,
executable = executable)
// Externally, this helper should be used to connect the register control port to a bus
val controlXing: TLInwardClockCrossingHelper = this.crossIn(controlNode)
// Backwards-compatibility default node accessor with no clock crossing
lazy val node: TLInwardNode = controlXing(NoCrossing)
// Internally, this function should be used to populate the control port with registers
protected def regmap(mapping: RegField.Map*): Unit = { controlNode.regmap(mapping:_*) }
}
File TileResetSetter.scala:
package chipyard.clocking
import chisel3._
import chisel3.util._
import chisel3.experimental.Analog
import org.chipsalliance.cde.config._
import freechips.rocketchip.subsystem._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.prci._
import freechips.rocketchip.util._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.devices.tilelink._
import freechips.rocketchip.regmapper._
import freechips.rocketchip.subsystem._
// Currently only works if all tiles are already driven by independent clock groups
// TODO: After https://github.com/chipsalliance/rocket-chip/pull/2842 is merged, we should
// always put all tiles on independent clock groups
class TileResetSetter(address: BigInt, beatBytes: Int, tileNames: Seq[String], initResetHarts: Seq[Int])(implicit p: Parameters)
extends LazyModule {
val device = new SimpleDevice("tile-reset-setter", Nil)
val tlNode = TLRegisterNode(Seq(AddressSet(address, 4096-1)), device, "reg/control", beatBytes=beatBytes)
val clockNode = ClockGroupIdentityNode()
lazy val module = new LazyModuleImp(this) {
val nTiles = p(TilesLocated(InSubsystem)).size
require (nTiles <= 4096 / 4)
val tile_async_resets = Wire(Vec(nTiles, Reset()))
val r_tile_resets = (0 until nTiles).map({ i =>
tile_async_resets(i) := true.B.asAsyncReset // Remove this line after https://github.com/chipsalliance/rocket-chip/pull/2842
withReset (tile_async_resets(i)) {
Module(new AsyncResetRegVec(w=1, init=(if (initResetHarts.contains(i)) 1 else 0)))
}
})
if (nTiles > 0)
tlNode.regmap((0 until nTiles).map({ i =>
i * 4 -> Seq(RegField.rwReg(1, r_tile_resets(i).io))
}): _*)
val tileMap = tileNames.zipWithIndex.map({ case (n, i) =>
n -> (tile_async_resets(i), r_tile_resets(i).io.q, address + i * 4)
})
(clockNode.out zip clockNode.in).map { case ((o, _), (i, _)) =>
(o.member.elements zip i.member.elements).foreach { case ((name, oD), (_, iD)) =>
oD.clock := iD.clock
oD.reset := iD.reset
for ((n, (rIn, rOut, addr)) <- tileMap) {
if (name.contains(n)) {
println(s"${addr.toString(16)}: Tile $name reset control")
// Async because the reset coming out of the AsyncResetRegVec is
// clocked to the bus this is attached to, not the clock in this
// clock bundle. We expect a ClockGroupResetSynchronizer downstream
// to synchronize the resets
// Also, this or enforces that the tiles come out of reset after the reset of the system
oD.reset := (rOut.asBool || iD.reset.asBool).asAsyncReset
rIn := iD.reset
}
}
}
}
}
}
File MuxLiteral.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.log2Ceil
import scala.reflect.ClassTag
/* MuxLiteral creates a lookup table from a key to a list of values.
* Unlike MuxLookup, the table keys must be exclusive literals.
*/
object MuxLiteral
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: (UInt, T), rest: (UInt, T)*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[(UInt, T)]): T =
MuxTable(index, default, cases.map { case (k, v) => (k.litValue, v) })
}
object MuxSeq
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: T, rest: T*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[T]): T =
MuxTable(index, default, cases.zipWithIndex.map { case (v, i) => (BigInt(i), v) })
}
object MuxTable
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: (BigInt, T), rest: (BigInt, T)*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[(BigInt, T)]): T = {
/* All keys must be >= 0 and distinct */
cases.foreach { case (k, _) => require (k >= 0) }
require (cases.map(_._1).distinct.size == cases.size)
/* Filter out any cases identical to the default */
val simple = cases.filter { case (k, v) => !default.isLit || !v.isLit || v.litValue != default.litValue }
val maxKey = (BigInt(0) +: simple.map(_._1)).max
val endIndex = BigInt(1) << log2Ceil(maxKey+1)
if (simple.isEmpty) {
default
} else if (endIndex <= 2*simple.size) {
/* The dense encoding case uses a Vec */
val table = Array.fill(endIndex.toInt) { default }
simple.foreach { case (k, v) => table(k.toInt) = v }
Mux(index >= endIndex.U, default, VecInit(table)(index))
} else {
/* The sparse encoding case uses switch */
val out = WireDefault(default)
simple.foldLeft(new chisel3.util.SwitchContext(index, None, Set.empty)) { case (acc, (k, v)) =>
acc.is (k.U) { out := v }
}
out
}
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TileResetSetter( // @[TileResetSetter.scala:26:25]
input clock, // @[TileResetSetter.scala:26:25]
input reset, // @[TileResetSetter.scala:26:25]
input auto_clock_in_member_allClocks_uncore_clock, // @[LazyModuleImp.scala:107:25]
input auto_clock_in_member_allClocks_uncore_reset, // @[LazyModuleImp.scala:107:25]
output auto_clock_out_member_allClocks_uncore_clock, // @[LazyModuleImp.scala:107:25]
output auto_clock_out_member_allClocks_uncore_reset, // @[LazyModuleImp.scala:107:25]
output auto_tl_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_tl_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_tl_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_tl_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_tl_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [10:0] auto_tl_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [20:0] auto_tl_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_tl_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_tl_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_tl_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_tl_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_tl_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_tl_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_tl_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [10:0] auto_tl_in_d_bits_source // @[LazyModuleImp.scala:107:25]
);
wire out_front_valid; // @[RegisterRouter.scala:87:24]
wire out_front_ready; // @[RegisterRouter.scala:87:24]
wire out_bits_read; // @[RegisterRouter.scala:87:24]
wire [10:0] out_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:87:24]
wire [8:0] in_bits_index; // @[RegisterRouter.scala:73:18]
wire in_bits_read; // @[RegisterRouter.scala:73:18]
wire auto_clock_in_member_allClocks_uncore_clock_0 = auto_clock_in_member_allClocks_uncore_clock; // @[TileResetSetter.scala:26:25]
wire auto_clock_in_member_allClocks_uncore_reset_0 = auto_clock_in_member_allClocks_uncore_reset; // @[TileResetSetter.scala:26:25]
wire auto_tl_in_a_valid_0 = auto_tl_in_a_valid; // @[TileResetSetter.scala:26:25]
wire [2:0] auto_tl_in_a_bits_opcode_0 = auto_tl_in_a_bits_opcode; // @[TileResetSetter.scala:26:25]
wire [2:0] auto_tl_in_a_bits_param_0 = auto_tl_in_a_bits_param; // @[TileResetSetter.scala:26:25]
wire [1:0] auto_tl_in_a_bits_size_0 = auto_tl_in_a_bits_size; // @[TileResetSetter.scala:26:25]
wire [10:0] auto_tl_in_a_bits_source_0 = auto_tl_in_a_bits_source; // @[TileResetSetter.scala:26:25]
wire [20:0] auto_tl_in_a_bits_address_0 = auto_tl_in_a_bits_address; // @[TileResetSetter.scala:26:25]
wire [7:0] auto_tl_in_a_bits_mask_0 = auto_tl_in_a_bits_mask; // @[TileResetSetter.scala:26:25]
wire [63:0] auto_tl_in_a_bits_data_0 = auto_tl_in_a_bits_data; // @[TileResetSetter.scala:26:25]
wire auto_tl_in_a_bits_corrupt_0 = auto_tl_in_a_bits_corrupt; // @[TileResetSetter.scala:26:25]
wire auto_tl_in_d_ready_0 = auto_tl_in_d_ready; // @[TileResetSetter.scala:26:25]
wire [1:0] _out_frontSel_T = 2'h1; // @[OneHot.scala:58:35]
wire [1:0] _out_backSel_T = 2'h1; // @[OneHot.scala:58:35]
wire [8:0] out_maskMatch = 9'h1FF; // @[RegisterRouter.scala:87:24]
wire tile_async_resets_0 = 1'h1; // @[TileResetSetter.scala:29:33]
wire _tile_async_resets_0_T = 1'h1; // @[TileResetSetter.scala:31:38]
wire out_frontSel_0 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_backSel_0 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_rifireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_5 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_rifireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_wifireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_6 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_wifireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_rofireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_5 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_rofireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_wofireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_6 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_wofireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_iready = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_oready = 1'h1; // @[RegisterRouter.scala:87:24]
wire [2:0] tlNodeIn_d_bits_d_opcode = 3'h0; // @[Edges.scala:792:17]
wire [1:0] auto_tl_in_d_bits_param = 2'h0; // @[TileResetSetter.scala:26:25]
wire [1:0] tlNodeIn_d_bits_param = 2'h0; // @[MixedNode.scala:551:17]
wire [1:0] tlNodeIn_d_bits_d_param = 2'h0; // @[Edges.scala:792:17]
wire auto_tl_in_d_bits_sink = 1'h0; // @[TileResetSetter.scala:26:25]
wire auto_tl_in_d_bits_denied = 1'h0; // @[TileResetSetter.scala:26:25]
wire auto_tl_in_d_bits_corrupt = 1'h0; // @[TileResetSetter.scala:26:25]
wire tlNodeIn_d_bits_sink = 1'h0; // @[MixedNode.scala:551:17]
wire tlNodeIn_d_bits_denied = 1'h0; // @[MixedNode.scala:551:17]
wire tlNodeIn_d_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire _out_T_7 = 1'h0; // @[RegisterRouter.scala:87:24]
wire _out_T_8 = 1'h0; // @[RegisterRouter.scala:87:24]
wire out_frontSel_1 = 1'h0; // @[RegisterRouter.scala:87:24]
wire out_backSel_1 = 1'h0; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_6 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_wifireMux_T_7 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_rofireMux_T_6 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_wofireMux_T_7 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_out_bits_data_T = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_out_bits_data_T_2 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_out_bits_data_WIRE_1_0 = 1'h0; // @[MuxLiteral.scala:49:48]
wire _out_out_bits_data_T_3 = 1'h0; // @[MuxLiteral.scala:49:10]
wire _out_out_bits_data_T_4 = 1'h0; // @[RegisterRouter.scala:87:24]
wire tlNodeIn_d_bits_d_sink = 1'h0; // @[Edges.scala:792:17]
wire tlNodeIn_d_bits_d_denied = 1'h0; // @[Edges.scala:792:17]
wire tlNodeIn_d_bits_d_corrupt = 1'h0; // @[Edges.scala:792:17]
wire [63:0] auto_tl_in_d_bits_data = 64'h0; // @[TileResetSetter.scala:26:25]
wire [63:0] tlNodeIn_d_bits_data = 64'h0; // @[MixedNode.scala:551:17]
wire [63:0] out_bits_data = 64'h0; // @[RegisterRouter.scala:87:24]
wire [63:0] tlNodeIn_d_bits_d_data = 64'h0; // @[Edges.scala:792:17]
wire clockNodeIn_member_allClocks_uncore_clock = auto_clock_in_member_allClocks_uncore_clock_0; // @[MixedNode.scala:551:17]
wire clockNodeOut_member_allClocks_uncore_clock; // @[MixedNode.scala:542:17]
wire clockNodeIn_member_allClocks_uncore_reset = auto_clock_in_member_allClocks_uncore_reset_0; // @[MixedNode.scala:551:17]
wire clockNodeOut_member_allClocks_uncore_reset; // @[MixedNode.scala:542:17]
wire tlNodeIn_a_ready; // @[MixedNode.scala:551:17]
wire tlNodeIn_a_valid = auto_tl_in_a_valid_0; // @[MixedNode.scala:551:17]
wire [2:0] tlNodeIn_a_bits_opcode = auto_tl_in_a_bits_opcode_0; // @[MixedNode.scala:551:17]
wire [2:0] tlNodeIn_a_bits_param = auto_tl_in_a_bits_param_0; // @[MixedNode.scala:551:17]
wire [1:0] tlNodeIn_a_bits_size = auto_tl_in_a_bits_size_0; // @[MixedNode.scala:551:17]
wire [10:0] tlNodeIn_a_bits_source = auto_tl_in_a_bits_source_0; // @[MixedNode.scala:551:17]
wire [20:0] tlNodeIn_a_bits_address = auto_tl_in_a_bits_address_0; // @[MixedNode.scala:551:17]
wire [7:0] tlNodeIn_a_bits_mask = auto_tl_in_a_bits_mask_0; // @[MixedNode.scala:551:17]
wire [63:0] tlNodeIn_a_bits_data = auto_tl_in_a_bits_data_0; // @[MixedNode.scala:551:17]
wire tlNodeIn_a_bits_corrupt = auto_tl_in_a_bits_corrupt_0; // @[MixedNode.scala:551:17]
wire tlNodeIn_d_ready = auto_tl_in_d_ready_0; // @[MixedNode.scala:551:17]
wire tlNodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] tlNodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] tlNodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [10:0] tlNodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire auto_clock_out_member_allClocks_uncore_clock_0; // @[TileResetSetter.scala:26:25]
wire auto_clock_out_member_allClocks_uncore_reset_0; // @[TileResetSetter.scala:26:25]
wire auto_tl_in_a_ready_0; // @[TileResetSetter.scala:26:25]
wire [2:0] auto_tl_in_d_bits_opcode_0; // @[TileResetSetter.scala:26:25]
wire [1:0] auto_tl_in_d_bits_size_0; // @[TileResetSetter.scala:26:25]
wire [10:0] auto_tl_in_d_bits_source_0; // @[TileResetSetter.scala:26:25]
wire auto_tl_in_d_valid_0; // @[TileResetSetter.scala:26:25]
wire in_ready; // @[RegisterRouter.scala:73:18]
assign auto_tl_in_a_ready_0 = tlNodeIn_a_ready; // @[MixedNode.scala:551:17]
wire in_valid = tlNodeIn_a_valid; // @[RegisterRouter.scala:73:18]
wire [1:0] in_bits_extra_tlrr_extra_size = tlNodeIn_a_bits_size; // @[RegisterRouter.scala:73:18]
wire [10:0] in_bits_extra_tlrr_extra_source = tlNodeIn_a_bits_source; // @[RegisterRouter.scala:73:18]
wire [7:0] in_bits_mask = tlNodeIn_a_bits_mask; // @[RegisterRouter.scala:73:18]
wire [63:0] in_bits_data = tlNodeIn_a_bits_data; // @[RegisterRouter.scala:73:18]
wire out_ready = tlNodeIn_d_ready; // @[RegisterRouter.scala:87:24]
wire out_valid; // @[RegisterRouter.scala:87:24]
assign auto_tl_in_d_valid_0 = tlNodeIn_d_valid; // @[MixedNode.scala:551:17]
assign auto_tl_in_d_bits_opcode_0 = tlNodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] tlNodeIn_d_bits_d_size; // @[Edges.scala:792:17]
assign auto_tl_in_d_bits_size_0 = tlNodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [10:0] tlNodeIn_d_bits_d_source; // @[Edges.scala:792:17]
assign auto_tl_in_d_bits_source_0 = tlNodeIn_d_bits_source; // @[MixedNode.scala:551:17]
assign auto_clock_out_member_allClocks_uncore_clock_0 = clockNodeOut_member_allClocks_uncore_clock; // @[MixedNode.scala:542:17]
assign auto_clock_out_member_allClocks_uncore_reset_0 = clockNodeOut_member_allClocks_uncore_reset; // @[MixedNode.scala:542:17]
assign clockNodeOut_member_allClocks_uncore_clock = clockNodeIn_member_allClocks_uncore_clock; // @[MixedNode.scala:542:17, :551:17]
assign clockNodeOut_member_allClocks_uncore_reset = clockNodeIn_member_allClocks_uncore_reset; // @[MixedNode.scala:542:17, :551:17]
wire _out_in_ready_T; // @[RegisterRouter.scala:87:24]
assign tlNodeIn_a_ready = in_ready; // @[RegisterRouter.scala:73:18]
wire _in_bits_read_T; // @[RegisterRouter.scala:74:36]
wire _out_front_valid_T = in_valid; // @[RegisterRouter.scala:73:18, :87:24]
wire out_front_bits_read = in_bits_read; // @[RegisterRouter.scala:73:18, :87:24]
wire [8:0] out_front_bits_index = in_bits_index; // @[RegisterRouter.scala:73:18, :87:24]
wire [63:0] out_front_bits_data = in_bits_data; // @[RegisterRouter.scala:73:18, :87:24]
wire [7:0] out_front_bits_mask = in_bits_mask; // @[RegisterRouter.scala:73:18, :87:24]
wire [10:0] out_front_bits_extra_tlrr_extra_source = in_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:73:18, :87:24]
wire [1:0] out_front_bits_extra_tlrr_extra_size = in_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:73:18, :87:24]
assign _in_bits_read_T = tlNodeIn_a_bits_opcode == 3'h4; // @[RegisterRouter.scala:74:36]
assign in_bits_read = _in_bits_read_T; // @[RegisterRouter.scala:73:18, :74:36]
wire [17:0] _in_bits_index_T = tlNodeIn_a_bits_address[20:3]; // @[Edges.scala:192:34]
assign in_bits_index = _in_bits_index_T[8:0]; // @[RegisterRouter.scala:73:18, :75:19]
wire _out_front_ready_T = out_ready; // @[RegisterRouter.scala:87:24]
wire _out_out_valid_T; // @[RegisterRouter.scala:87:24]
assign tlNodeIn_d_valid = out_valid; // @[RegisterRouter.scala:87:24]
wire _tlNodeIn_d_bits_opcode_T = out_bits_read; // @[RegisterRouter.scala:87:24, :105:25]
assign tlNodeIn_d_bits_d_source = out_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:87:24]
wire [1:0] out_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:87:24]
assign tlNodeIn_d_bits_d_size = out_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:87:24]
assign _out_in_ready_T = out_front_ready; // @[RegisterRouter.scala:87:24]
assign _out_out_valid_T = out_front_valid; // @[RegisterRouter.scala:87:24]
assign out_bits_read = out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire [8:0] out_findex = out_front_bits_index; // @[RegisterRouter.scala:87:24]
wire [8:0] out_bindex = out_front_bits_index; // @[RegisterRouter.scala:87:24]
assign out_bits_extra_tlrr_extra_source = out_front_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:87:24]
assign out_bits_extra_tlrr_extra_size = out_front_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:87:24]
wire _out_T = out_findex == 9'h0; // @[RegisterRouter.scala:87:24]
wire _out_T_1 = out_bindex == 9'h0; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_out_bits_data_WIRE_0 = _out_T_1; // @[MuxLiteral.scala:49:48]
wire out_rivalid_0; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_4; // @[RegisterRouter.scala:87:24]
wire out_wivalid_0; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_3; // @[RegisterRouter.scala:87:24]
wire out_roready_0; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_4; // @[RegisterRouter.scala:87:24]
wire out_woready_0; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T = out_front_bits_mask[0]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T = out_front_bits_mask[0]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_1 = out_front_bits_mask[1]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_1 = out_front_bits_mask[1]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_2 = out_front_bits_mask[2]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_2 = out_front_bits_mask[2]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_3 = out_front_bits_mask[3]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_3 = out_front_bits_mask[3]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_4 = out_front_bits_mask[4]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_4 = out_front_bits_mask[4]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_5 = out_front_bits_mask[5]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_5 = out_front_bits_mask[5]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_6 = out_front_bits_mask[6]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_6 = out_front_bits_mask[6]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_7 = out_front_bits_mask[7]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_7 = out_front_bits_mask[7]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_8 = {8{_out_frontMask_T}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_9 = {8{_out_frontMask_T_1}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_10 = {8{_out_frontMask_T_2}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_11 = {8{_out_frontMask_T_3}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_12 = {8{_out_frontMask_T_4}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_13 = {8{_out_frontMask_T_5}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_14 = {8{_out_frontMask_T_6}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_15 = {8{_out_frontMask_T_7}}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_lo_lo = {_out_frontMask_T_9, _out_frontMask_T_8}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_lo_hi = {_out_frontMask_T_11, _out_frontMask_T_10}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_frontMask_lo = {out_frontMask_lo_hi, out_frontMask_lo_lo}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_hi_lo = {_out_frontMask_T_13, _out_frontMask_T_12}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_hi_hi = {_out_frontMask_T_15, _out_frontMask_T_14}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_frontMask_hi = {out_frontMask_hi_hi, out_frontMask_hi_lo}; // @[RegisterRouter.scala:87:24]
wire [63:0] out_frontMask = {out_frontMask_hi, out_frontMask_lo}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_8 = {8{_out_backMask_T}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_9 = {8{_out_backMask_T_1}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_10 = {8{_out_backMask_T_2}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_11 = {8{_out_backMask_T_3}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_12 = {8{_out_backMask_T_4}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_13 = {8{_out_backMask_T_5}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_14 = {8{_out_backMask_T_6}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_15 = {8{_out_backMask_T_7}}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_lo_lo = {_out_backMask_T_9, _out_backMask_T_8}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_lo_hi = {_out_backMask_T_11, _out_backMask_T_10}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_backMask_lo = {out_backMask_lo_hi, out_backMask_lo_lo}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_hi_lo = {_out_backMask_T_13, _out_backMask_T_12}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_hi_hi = {_out_backMask_T_15, _out_backMask_T_14}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_backMask_hi = {out_backMask_hi_hi, out_backMask_hi_lo}; // @[RegisterRouter.scala:87:24]
wire [63:0] out_backMask = {out_backMask_hi, out_backMask_lo}; // @[RegisterRouter.scala:87:24]
wire _out_rimask_T = out_frontMask[0]; // @[RegisterRouter.scala:87:24]
wire _out_wimask_T = out_frontMask[0]; // @[RegisterRouter.scala:87:24]
wire out_rimask = _out_rimask_T; // @[RegisterRouter.scala:87:24]
wire out_wimask = _out_wimask_T; // @[RegisterRouter.scala:87:24]
wire _out_romask_T = out_backMask[0]; // @[RegisterRouter.scala:87:24]
wire _out_womask_T = out_backMask[0]; // @[RegisterRouter.scala:87:24]
wire out_romask = _out_romask_T; // @[RegisterRouter.scala:87:24]
wire out_womask = _out_womask_T; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid = out_rivalid_0 & out_rimask; // @[RegisterRouter.scala:87:24]
wire out_f_roready = out_roready_0 & out_romask; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid = out_wivalid_0 & out_wimask; // @[RegisterRouter.scala:87:24]
wire out_f_woready = out_woready_0 & out_womask; // @[RegisterRouter.scala:87:24]
wire _out_T_2 = out_front_bits_data[0]; // @[RegisterRouter.scala:87:24]
wire _out_T_3 = ~out_rimask; // @[RegisterRouter.scala:87:24]
wire _out_T_4 = ~out_wimask; // @[RegisterRouter.scala:87:24]
wire _out_T_5 = ~out_romask; // @[RegisterRouter.scala:87:24]
wire _out_T_6 = ~out_womask; // @[RegisterRouter.scala:87:24]
wire _GEN = in_valid & out_front_ready; // @[RegisterRouter.scala:73:18, :87:24]
wire _out_rifireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_rifireMux_T = _GEN; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_wifireMux_T = _GEN; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_1 = _out_rifireMux_T & out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_2 = _out_rifireMux_T_1; // @[RegisterRouter.scala:87:24]
assign _out_rifireMux_T_3 = _out_rifireMux_T_2 & _out_T; // @[RegisterRouter.scala:87:24]
assign out_rivalid_0 = _out_rifireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_4 = ~_out_T; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_1 = ~out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_2 = _out_wifireMux_T & _out_wifireMux_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_3 = _out_wifireMux_T_2; // @[RegisterRouter.scala:87:24]
assign _out_wifireMux_T_4 = _out_wifireMux_T_3 & _out_T; // @[RegisterRouter.scala:87:24]
assign out_wivalid_0 = _out_wifireMux_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_5 = ~_out_T; // @[RegisterRouter.scala:87:24]
wire _GEN_0 = out_front_valid & out_ready; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_rofireMux_T = _GEN_0; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_wofireMux_T = _GEN_0; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_1 = _out_rofireMux_T & out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_2 = _out_rofireMux_T_1; // @[RegisterRouter.scala:87:24]
assign _out_rofireMux_T_3 = _out_rofireMux_T_2 & _out_T_1; // @[RegisterRouter.scala:87:24]
assign out_roready_0 = _out_rofireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_4 = ~_out_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_1 = ~out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_2 = _out_wofireMux_T & _out_wofireMux_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_3 = _out_wofireMux_T_2; // @[RegisterRouter.scala:87:24]
assign _out_wofireMux_T_4 = _out_wofireMux_T_3 & _out_T_1; // @[RegisterRouter.scala:87:24]
assign out_woready_0 = _out_wofireMux_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_5 = ~_out_T_1; // @[RegisterRouter.scala:87:24]
assign in_ready = _out_in_ready_T; // @[RegisterRouter.scala:73:18, :87:24]
assign out_front_valid = _out_front_valid_T; // @[RegisterRouter.scala:87:24]
assign out_front_ready = _out_front_ready_T; // @[RegisterRouter.scala:87:24]
assign out_valid = _out_out_valid_T; // @[RegisterRouter.scala:87:24]
wire _out_out_bits_data_T_1 = _out_out_bits_data_WIRE_0; // @[MuxLiteral.scala:49:{10,48}]
assign tlNodeIn_d_bits_size = tlNodeIn_d_bits_d_size; // @[Edges.scala:792:17]
assign tlNodeIn_d_bits_source = tlNodeIn_d_bits_d_source; // @[Edges.scala:792:17]
assign tlNodeIn_d_bits_opcode = {2'h0, _tlNodeIn_d_bits_opcode_T}; // @[RegisterRouter.scala:105:{19,25}]
TLMonitor_63 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (tlNodeIn_a_ready), // @[MixedNode.scala:551:17]
.io_in_a_valid (tlNodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_in_a_bits_opcode (tlNodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_a_bits_param (tlNodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_in_a_bits_size (tlNodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_in_a_bits_source (tlNodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_in_a_bits_address (tlNodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_in_a_bits_mask (tlNodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_in_a_bits_data (tlNodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_in_a_bits_corrupt (tlNodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_d_ready (tlNodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_in_d_valid (tlNodeIn_d_valid), // @[MixedNode.scala:551:17]
.io_in_d_bits_opcode (tlNodeIn_d_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_d_bits_size (tlNodeIn_d_bits_size), // @[MixedNode.scala:551:17]
.io_in_d_bits_source (tlNodeIn_d_bits_source) // @[MixedNode.scala:551:17]
); // @[Nodes.scala:27:25]
AsyncResetRegVec_w1_i0_6 r_tile_resets_0 ( // @[TileResetSetter.scala:33:15]
.clock (clock),
.io_d (_out_T_2), // @[RegisterRouter.scala:87:24]
.io_en (out_f_woready) // @[RegisterRouter.scala:87:24]
); // @[TileResetSetter.scala:33:15]
assign auto_clock_out_member_allClocks_uncore_clock = auto_clock_out_member_allClocks_uncore_clock_0; // @[TileResetSetter.scala:26:25]
assign auto_clock_out_member_allClocks_uncore_reset = auto_clock_out_member_allClocks_uncore_reset_0; // @[TileResetSetter.scala:26:25]
assign auto_tl_in_a_ready = auto_tl_in_a_ready_0; // @[TileResetSetter.scala:26:25]
assign auto_tl_in_d_valid = auto_tl_in_d_valid_0; // @[TileResetSetter.scala:26:25]
assign auto_tl_in_d_bits_opcode = auto_tl_in_d_bits_opcode_0; // @[TileResetSetter.scala:26:25]
assign auto_tl_in_d_bits_size = auto_tl_in_d_bits_size_0; // @[TileResetSetter.scala:26:25]
assign auto_tl_in_d_bits_source = auto_tl_in_d_bits_source_0; // @[TileResetSetter.scala:26:25]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RegMapper.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.regmapper
import chisel3._
import chisel3.experimental.SourceInfo
import chisel3.util._
import freechips.rocketchip.diplomacy.AddressDecoder
import freechips.rocketchip.util.{BundleFieldBase, BundleMap, MuxSeq, ReduceOthers, property}
// A bus agnostic register interface to a register-based device
case class RegMapperParams(indexBits: Int, maskBits: Int, extraFields: Seq[BundleFieldBase] = Nil)
class RegMapperInput(val params: RegMapperParams) extends Bundle
{
val read = Bool()
val index = UInt((params.indexBits).W)
val data = UInt((params.maskBits*8).W)
val mask = UInt((params.maskBits).W)
val extra = BundleMap(params.extraFields)
}
class RegMapperOutput(val params: RegMapperParams) extends Bundle
{
val read = Bool()
val data = UInt((params.maskBits*8).W)
val extra = BundleMap(params.extraFields)
}
object RegMapper
{
// Create a generic register-based device
def apply(bytes: Int, concurrency: Int, undefZero: Boolean, in: DecoupledIO[RegMapperInput], mapping: RegField.Map*)(implicit sourceInfo: SourceInfo) = {
// Filter out zero-width fields
val bytemap = mapping.toList.map { case (offset, fields) => (offset, fields.filter(_.width != 0)) }
// Negative addresses are bad
bytemap.foreach { byte => require (byte._1 >= 0) }
// Transform all fields into bit offsets Seq[(bit, field)]
val bitmap = bytemap.map { case (byte, fields) =>
val bits = fields.scanLeft(byte * 8)(_ + _.width).init
bits zip fields
}.flatten.sortBy(_._1)
// Detect overlaps
(bitmap.init zip bitmap.tail) foreach { case ((lbit, lfield), (rbit, rfield)) =>
require (lbit + lfield.width <= rbit, s"Register map overlaps at bit ${rbit}.")
}
// Group those fields into bus words Map[word, List[(bit, field)]]
val wordmap = bitmap.groupBy(_._1 / (8*bytes))
// Make sure registers fit
val inParams = in.bits.params
val inBits = inParams.indexBits
assert (wordmap.keySet.max < (1 << inBits), "Register map does not fit in device")
val out = Wire(Decoupled(new RegMapperOutput(inParams)))
val front = Wire(Decoupled(new RegMapperInput(inParams)))
front.bits := in.bits
// Must this device pipeline the control channel?
val pipelined = wordmap.values.map(_.map(_._2.pipelined)).flatten.reduce(_ || _)
val depth = concurrency
require (depth >= 0)
require (!pipelined || depth > 0, "Register-based device with request/response handshaking needs concurrency > 0")
val back = if (depth > 0) {
val front_q = Module(new Queue(new RegMapperInput(inParams), depth) {
override def desiredName = s"Queue${depth}_${front.bits.typeName}_i${inParams.indexBits}_m${inParams.maskBits}"
})
front_q.io.enq <> front
front_q.io.deq
} else front
// Convert to and from Bits
def toBits(x: Int, tail: List[Boolean] = List.empty): List[Boolean] =
if (x == 0) tail.reverse else toBits(x >> 1, ((x & 1) == 1) :: tail)
def ofBits(bits: List[Boolean]) = bits.foldRight(0){ case (x,y) => (if (x) 1 else 0) | y << 1 }
// Find the minimal mask that can decide the register map
val mask = AddressDecoder(wordmap.keySet.toList)
val maskMatch = ~mask.U(inBits.W)
val maskFilter = toBits(mask)
val maskBits = maskFilter.filter(x => x).size
// Calculate size and indexes into the register map
val regSize = 1 << maskBits
def regIndexI(x: Int) = ofBits((maskFilter zip toBits(x)).filter(_._1).map(_._2))
def regIndexU(x: UInt) = if (maskBits == 0) 0.U else
Cat((maskFilter zip x.asBools).filter(_._1).map(_._2).reverse)
val findex = front.bits.index & maskMatch
val bindex = back .bits.index & maskMatch
// Protection flag for undefined registers
val iRightReg = Array.fill(regSize) { true.B }
val oRightReg = Array.fill(regSize) { true.B }
// Transform the wordmap into minimal decoded indexes, Seq[(index, bit, field)]
val flat = wordmap.toList.map { case (word, fields) =>
val index = regIndexI(word)
if (undefZero) {
val uint = (word & ~mask).U(inBits.W)
iRightReg(index) = findex === uint
oRightReg(index) = bindex === uint
}
// Confirm that no field spans a word boundary
fields foreach { case (bit, field) =>
val off = bit - 8*bytes*word
// println(s"Reg ${word}: [${off}, ${off+field.width})")
require (off + field.width <= bytes * 8, s"Field at word ${word}*(${bytes}B) has bits [${off}, ${off+field.width}), which exceeds word limit.")
}
// println("mapping 0x%x -> 0x%x for 0x%x/%d".format(word, index, mask, maskBits))
fields.map { case (bit, field) => (index, bit-8*bytes*word, field) }
}.flatten
// Forward declaration of all flow control signals
val rivalid = Wire(Vec(flat.size, Bool()))
val wivalid = Wire(Vec(flat.size, Bool()))
val roready = Wire(Vec(flat.size, Bool()))
val woready = Wire(Vec(flat.size, Bool()))
// Per-register list of all control signals needed for data to flow
val rifire = Array.fill(regSize) { Nil:List[(Bool, Bool)] }
val wifire = Array.fill(regSize) { Nil:List[(Bool, Bool)] }
val rofire = Array.fill(regSize) { Nil:List[(Bool, Bool)] }
val wofire = Array.fill(regSize) { Nil:List[(Bool, Bool)] }
// The output values for each register
val dataOut = Array.fill(regSize) { 0.U }
// Which bits are touched?
val frontMask = FillInterleaved(8, front.bits.mask)
val backMask = FillInterleaved(8, back .bits.mask)
// Connect the fields
for (i <- 0 until flat.size) {
val (reg, low, field) = flat(i)
val high = low + field.width - 1
// Confirm that no register is too big
require (high < 8*bytes)
val rimask = frontMask(high, low).orR
val wimask = frontMask(high, low).andR
val romask = backMask(high, low).orR
val womask = backMask(high, low).andR
val data = if (field.write.combinational) back.bits.data else front.bits.data
val f_rivalid = rivalid(i) && rimask
val f_roready = roready(i) && romask
val f_wivalid = wivalid(i) && wimask
val f_woready = woready(i) && womask
val (f_riready, f_rovalid, f_data) = field.read.fn(f_rivalid, f_roready)
val (f_wiready, f_wovalid) = field.write.fn(f_wivalid, f_woready, data(high, low))
// cover reads and writes to register
val fname = field.desc.map{_.name}.getOrElse("")
val fdesc = field.desc.map{_.desc + ":"}.getOrElse("")
val facct = field.desc.map{_.access}.getOrElse("")
if((facct == RegFieldAccessType.R) || (facct == RegFieldAccessType.RW)) {
property.cover(f_rivalid && f_riready, fname + "_Reg_read_start", fdesc + " RegField Read Request Initiate")
property.cover(f_rovalid && f_roready, fname + "_Reg_read_out", fdesc + " RegField Read Request Complete")
}
if((facct == RegFieldAccessType.W) || (facct == RegFieldAccessType.RW)) {
property.cover(f_wivalid && f_wiready, fname + "_Reg_write_start", fdesc + " RegField Write Request Initiate")
property.cover(f_wovalid && f_woready, fname + "_Reg_write_out", fdesc + " RegField Write Request Complete")
}
def litOR(x: Bool, y: Bool) = if (x.isLit && x.litValue == 1) true.B else x || y
// Add this field to the ready-valid signals for the register
rifire(reg) = (rivalid(i), litOR(f_riready, !rimask)) +: rifire(reg)
wifire(reg) = (wivalid(i), litOR(f_wiready, !wimask)) +: wifire(reg)
rofire(reg) = (roready(i), litOR(f_rovalid, !romask)) +: rofire(reg)
wofire(reg) = (woready(i), litOR(f_wovalid, !womask)) +: wofire(reg)
// ... this loop iterates from smallest to largest bit offset
val prepend = if (low == 0) { f_data } else { Cat(f_data, dataOut(reg) | 0.U(low.W)) }
dataOut(reg) = (prepend | 0.U((high+1).W))(high, 0)
}
// Which register is touched?
val iindex = regIndexU(front.bits.index)
val oindex = regIndexU(back .bits.index)
val frontSel = UIntToOH(iindex).asBools
val backSel = UIntToOH(oindex).asBools
// Compute: is the selected register ready? ... and cross-connect all ready-valids
def mux(index: UInt, valid: Bool, select: Seq[Bool], guard: Seq[Bool], flow: Seq[Seq[(Bool, Bool)]]): Bool =
MuxSeq(index, true.B, ((select zip guard) zip flow).map { case ((s, g), f) =>
val out = Wire(Bool())
ReduceOthers((out, valid && s && g) +: f)
out || !g
})
// Include the per-register one-hot selected criteria
val rifireMux = mux(iindex, in.valid && front.ready && front.bits.read, frontSel, iRightReg, rifire)
val wifireMux = mux(iindex, in.valid && front.ready && !front.bits.read, frontSel, iRightReg, wifire)
val rofireMux = mux(oindex, back.valid && out.ready && back .bits.read, backSel, oRightReg, rofire)
val wofireMux = mux(oindex, back.valid && out.ready && !back .bits.read, backSel, oRightReg, wofire)
val iready = Mux(front.bits.read, rifireMux, wifireMux)
val oready = Mux(back .bits.read, rofireMux, wofireMux)
// Connect the pipeline
in.ready := front.ready && iready
front.valid := in.valid && iready
back.ready := out.ready && oready
out.valid := back.valid && oready
out.bits.read := back.bits.read
out.bits.data := Mux(MuxSeq(oindex, true.B, oRightReg),
MuxSeq(oindex, 0.U, dataOut),
0.U)
out.bits.extra := back.bits.extra
out
}
}
| module Queue1_RegMapperInput_i9_m8( // @[RegMapper.scala:71:32]
input clock, // @[RegMapper.scala:71:32]
input reset, // @[RegMapper.scala:71:32]
output io_enq_ready, // @[Decoupled.scala:255:14]
input io_enq_valid, // @[Decoupled.scala:255:14]
input io_enq_bits_read, // @[Decoupled.scala:255:14]
input [8:0] io_enq_bits_index, // @[Decoupled.scala:255:14]
input [63:0] io_enq_bits_data, // @[Decoupled.scala:255:14]
input [7:0] io_enq_bits_mask, // @[Decoupled.scala:255:14]
input [10:0] io_enq_bits_extra_tlrr_extra_source, // @[Decoupled.scala:255:14]
input [1:0] io_enq_bits_extra_tlrr_extra_size, // @[Decoupled.scala:255:14]
input io_deq_ready, // @[Decoupled.scala:255:14]
output io_deq_valid, // @[Decoupled.scala:255:14]
output io_deq_bits_read, // @[Decoupled.scala:255:14]
output [8:0] io_deq_bits_index, // @[Decoupled.scala:255:14]
output [7:0] io_deq_bits_mask, // @[Decoupled.scala:255:14]
output [10:0] io_deq_bits_extra_tlrr_extra_source, // @[Decoupled.scala:255:14]
output [1:0] io_deq_bits_extra_tlrr_extra_size // @[Decoupled.scala:255:14]
);
reg [94:0] ram; // @[Decoupled.scala:256:91]
reg full; // @[Decoupled.scala:259:27]
wire do_enq = ~full & io_enq_valid; // @[Decoupled.scala:51:35, :259:27, :286:19]
always @(posedge clock) begin // @[RegMapper.scala:71:32]
if (do_enq) // @[Decoupled.scala:51:35]
ram <= {io_enq_bits_extra_tlrr_extra_size, io_enq_bits_extra_tlrr_extra_source, io_enq_bits_mask, io_enq_bits_data, io_enq_bits_index, io_enq_bits_read}; // @[Decoupled.scala:256:91]
if (reset) // @[RegMapper.scala:71:32]
full <= 1'h0; // @[Decoupled.scala:259:27]
else if (~(do_enq == (io_deq_ready & full))) // @[Decoupled.scala:51:35, :259:27, :276:{15,27}, :277:16]
full <= do_enq; // @[Decoupled.scala:51:35, :259:27]
always @(posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.diplomacy.{
AddressDecoder, AddressSet, BufferParams, DirectedBuffers, IdMap, IdMapEntry,
IdRange, RegionType, TransferSizes
}
import freechips.rocketchip.resources.{Resource, ResourceAddress, ResourcePermissions}
import freechips.rocketchip.util.{
AsyncQueueParams, BundleField, BundleFieldBase, BundleKeyBase,
CreditedDelay, groupByIntoSeq, RationalDirection, SimpleProduct
}
import scala.math.max
//These transfer sizes describe requests issued from masters on the A channel that will be responded by slaves on the D channel
case class TLMasterToSlaveTransferSizes(
// Supports both Acquire+Release of the following two sizes:
acquireT: TransferSizes = TransferSizes.none,
acquireB: TransferSizes = TransferSizes.none,
arithmetic: TransferSizes = TransferSizes.none,
logical: TransferSizes = TransferSizes.none,
get: TransferSizes = TransferSizes.none,
putFull: TransferSizes = TransferSizes.none,
putPartial: TransferSizes = TransferSizes.none,
hint: TransferSizes = TransferSizes.none)
extends TLCommonTransferSizes {
def intersect(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes(
acquireT = acquireT .intersect(rhs.acquireT),
acquireB = acquireB .intersect(rhs.acquireB),
arithmetic = arithmetic.intersect(rhs.arithmetic),
logical = logical .intersect(rhs.logical),
get = get .intersect(rhs.get),
putFull = putFull .intersect(rhs.putFull),
putPartial = putPartial.intersect(rhs.putPartial),
hint = hint .intersect(rhs.hint))
def mincover(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes(
acquireT = acquireT .mincover(rhs.acquireT),
acquireB = acquireB .mincover(rhs.acquireB),
arithmetic = arithmetic.mincover(rhs.arithmetic),
logical = logical .mincover(rhs.logical),
get = get .mincover(rhs.get),
putFull = putFull .mincover(rhs.putFull),
putPartial = putPartial.mincover(rhs.putPartial),
hint = hint .mincover(rhs.hint))
// Reduce rendering to a simple yes/no per field
override def toString = {
def str(x: TransferSizes, flag: String) = if (x.none) "" else flag
def flags = Vector(
str(acquireT, "T"),
str(acquireB, "B"),
str(arithmetic, "A"),
str(logical, "L"),
str(get, "G"),
str(putFull, "F"),
str(putPartial, "P"),
str(hint, "H"))
flags.mkString
}
// Prints out the actual information in a user readable way
def infoString = {
s"""acquireT = ${acquireT}
|acquireB = ${acquireB}
|arithmetic = ${arithmetic}
|logical = ${logical}
|get = ${get}
|putFull = ${putFull}
|putPartial = ${putPartial}
|hint = ${hint}
|
|""".stripMargin
}
}
object TLMasterToSlaveTransferSizes {
def unknownEmits = TLMasterToSlaveTransferSizes(
acquireT = TransferSizes(1, 4096),
acquireB = TransferSizes(1, 4096),
arithmetic = TransferSizes(1, 4096),
logical = TransferSizes(1, 4096),
get = TransferSizes(1, 4096),
putFull = TransferSizes(1, 4096),
putPartial = TransferSizes(1, 4096),
hint = TransferSizes(1, 4096))
def unknownSupports = TLMasterToSlaveTransferSizes()
}
//These transfer sizes describe requests issued from slaves on the B channel that will be responded by masters on the C channel
case class TLSlaveToMasterTransferSizes(
probe: TransferSizes = TransferSizes.none,
arithmetic: TransferSizes = TransferSizes.none,
logical: TransferSizes = TransferSizes.none,
get: TransferSizes = TransferSizes.none,
putFull: TransferSizes = TransferSizes.none,
putPartial: TransferSizes = TransferSizes.none,
hint: TransferSizes = TransferSizes.none
) extends TLCommonTransferSizes {
def intersect(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes(
probe = probe .intersect(rhs.probe),
arithmetic = arithmetic.intersect(rhs.arithmetic),
logical = logical .intersect(rhs.logical),
get = get .intersect(rhs.get),
putFull = putFull .intersect(rhs.putFull),
putPartial = putPartial.intersect(rhs.putPartial),
hint = hint .intersect(rhs.hint)
)
def mincover(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes(
probe = probe .mincover(rhs.probe),
arithmetic = arithmetic.mincover(rhs.arithmetic),
logical = logical .mincover(rhs.logical),
get = get .mincover(rhs.get),
putFull = putFull .mincover(rhs.putFull),
putPartial = putPartial.mincover(rhs.putPartial),
hint = hint .mincover(rhs.hint)
)
// Reduce rendering to a simple yes/no per field
override def toString = {
def str(x: TransferSizes, flag: String) = if (x.none) "" else flag
def flags = Vector(
str(probe, "P"),
str(arithmetic, "A"),
str(logical, "L"),
str(get, "G"),
str(putFull, "F"),
str(putPartial, "P"),
str(hint, "H"))
flags.mkString
}
// Prints out the actual information in a user readable way
def infoString = {
s"""probe = ${probe}
|arithmetic = ${arithmetic}
|logical = ${logical}
|get = ${get}
|putFull = ${putFull}
|putPartial = ${putPartial}
|hint = ${hint}
|
|""".stripMargin
}
}
object TLSlaveToMasterTransferSizes {
def unknownEmits = TLSlaveToMasterTransferSizes(
arithmetic = TransferSizes(1, 4096),
logical = TransferSizes(1, 4096),
get = TransferSizes(1, 4096),
putFull = TransferSizes(1, 4096),
putPartial = TransferSizes(1, 4096),
hint = TransferSizes(1, 4096),
probe = TransferSizes(1, 4096))
def unknownSupports = TLSlaveToMasterTransferSizes()
}
trait TLCommonTransferSizes {
def arithmetic: TransferSizes
def logical: TransferSizes
def get: TransferSizes
def putFull: TransferSizes
def putPartial: TransferSizes
def hint: TransferSizes
}
class TLSlaveParameters private(
val nodePath: Seq[BaseNode],
val resources: Seq[Resource],
setName: Option[String],
val address: Seq[AddressSet],
val regionType: RegionType.T,
val executable: Boolean,
val fifoId: Option[Int],
val supports: TLMasterToSlaveTransferSizes,
val emits: TLSlaveToMasterTransferSizes,
// By default, slaves are forbidden from issuing 'denied' responses (it prevents Fragmentation)
val alwaysGrantsT: Boolean, // typically only true for CacheCork'd read-write devices; dual: neverReleaseData
// If fifoId=Some, all accesses sent to the same fifoId are executed and ACK'd in FIFO order
// Note: you can only rely on this FIFO behaviour if your TLMasterParameters include requestFifo
val mayDenyGet: Boolean, // applies to: AccessAckData, GrantData
val mayDenyPut: Boolean) // applies to: AccessAck, Grant, HintAck
// ReleaseAck may NEVER be denied
extends SimpleProduct
{
def sortedAddress = address.sorted
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlaveParameters]
override def productPrefix = "TLSlaveParameters"
// We intentionally omit nodePath for equality testing / formatting
def productArity: Int = 11
def productElement(n: Int): Any = n match {
case 0 => name
case 1 => address
case 2 => resources
case 3 => regionType
case 4 => executable
case 5 => fifoId
case 6 => supports
case 7 => emits
case 8 => alwaysGrantsT
case 9 => mayDenyGet
case 10 => mayDenyPut
case _ => throw new IndexOutOfBoundsException(n.toString)
}
def supportsAcquireT: TransferSizes = supports.acquireT
def supportsAcquireB: TransferSizes = supports.acquireB
def supportsArithmetic: TransferSizes = supports.arithmetic
def supportsLogical: TransferSizes = supports.logical
def supportsGet: TransferSizes = supports.get
def supportsPutFull: TransferSizes = supports.putFull
def supportsPutPartial: TransferSizes = supports.putPartial
def supportsHint: TransferSizes = supports.hint
require (!address.isEmpty, "Address cannot be empty")
address.foreach { a => require (a.finite, "Address must be finite") }
address.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") }
require (supportsPutFull.contains(supportsPutPartial), s"PutFull($supportsPutFull) < PutPartial($supportsPutPartial)")
require (supportsPutFull.contains(supportsArithmetic), s"PutFull($supportsPutFull) < Arithmetic($supportsArithmetic)")
require (supportsPutFull.contains(supportsLogical), s"PutFull($supportsPutFull) < Logical($supportsLogical)")
require (supportsGet.contains(supportsArithmetic), s"Get($supportsGet) < Arithmetic($supportsArithmetic)")
require (supportsGet.contains(supportsLogical), s"Get($supportsGet) < Logical($supportsLogical)")
require (supportsAcquireB.contains(supportsAcquireT), s"AcquireB($supportsAcquireB) < AcquireT($supportsAcquireT)")
require (!alwaysGrantsT || supportsAcquireT, s"Must supportAcquireT if promising to always grantT")
// Make sure that the regionType agrees with the capabilities
require (!supportsAcquireB || regionType >= RegionType.UNCACHED) // acquire -> uncached, tracked, cached
require (regionType <= RegionType.UNCACHED || supportsAcquireB) // tracked, cached -> acquire
require (regionType != RegionType.UNCACHED || supportsGet) // uncached -> supportsGet
val name = setName.orElse(nodePath.lastOption.map(_.lazyModule.name)).getOrElse("disconnected")
val maxTransfer = List( // Largest supported transfer of all types
supportsAcquireT.max,
supportsAcquireB.max,
supportsArithmetic.max,
supportsLogical.max,
supportsGet.max,
supportsPutFull.max,
supportsPutPartial.max).max
val maxAddress = address.map(_.max).max
val minAlignment = address.map(_.alignment).min
// The device had better not support a transfer larger than its alignment
require (minAlignment >= maxTransfer, s"Bad $address: minAlignment ($minAlignment) must be >= maxTransfer ($maxTransfer)")
def toResource: ResourceAddress = {
ResourceAddress(address, ResourcePermissions(
r = supportsAcquireB || supportsGet,
w = supportsAcquireT || supportsPutFull,
x = executable,
c = supportsAcquireB,
a = supportsArithmetic && supportsLogical))
}
def findTreeViolation() = nodePath.find {
case _: MixedAdapterNode[_, _, _, _, _, _, _, _] => false
case _: SinkNode[_, _, _, _, _] => false
case node => node.inputs.size != 1
}
def isTree = findTreeViolation() == None
def infoString = {
s"""Slave Name = ${name}
|Slave Address = ${address}
|supports = ${supports.infoString}
|
|""".stripMargin
}
def v1copy(
address: Seq[AddressSet] = address,
resources: Seq[Resource] = resources,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
nodePath: Seq[BaseNode] = nodePath,
supportsAcquireT: TransferSizes = supports.acquireT,
supportsAcquireB: TransferSizes = supports.acquireB,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut,
alwaysGrantsT: Boolean = alwaysGrantsT,
fifoId: Option[Int] = fifoId) =
{
new TLSlaveParameters(
setName = setName,
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supports = TLMasterToSlaveTransferSizes(
acquireT = supportsAcquireT,
acquireB = supportsAcquireB,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = emits,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
def v2copy(
nodePath: Seq[BaseNode] = nodePath,
resources: Seq[Resource] = resources,
name: Option[String] = setName,
address: Seq[AddressSet] = address,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
fifoId: Option[Int] = fifoId,
supports: TLMasterToSlaveTransferSizes = supports,
emits: TLSlaveToMasterTransferSizes = emits,
alwaysGrantsT: Boolean = alwaysGrantsT,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut) =
{
new TLSlaveParameters(
nodePath = nodePath,
resources = resources,
setName = name,
address = address,
regionType = regionType,
executable = executable,
fifoId = fifoId,
supports = supports,
emits = emits,
alwaysGrantsT = alwaysGrantsT,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut)
}
@deprecated("Use v1copy instead of copy","")
def copy(
address: Seq[AddressSet] = address,
resources: Seq[Resource] = resources,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
nodePath: Seq[BaseNode] = nodePath,
supportsAcquireT: TransferSizes = supports.acquireT,
supportsAcquireB: TransferSizes = supports.acquireB,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut,
alwaysGrantsT: Boolean = alwaysGrantsT,
fifoId: Option[Int] = fifoId) =
{
v1copy(
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supportsAcquireT = supportsAcquireT,
supportsAcquireB = supportsAcquireB,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
}
object TLSlaveParameters {
def v1(
address: Seq[AddressSet],
resources: Seq[Resource] = Seq(),
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
nodePath: Seq[BaseNode] = Seq(),
supportsAcquireT: TransferSizes = TransferSizes.none,
supportsAcquireB: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false,
alwaysGrantsT: Boolean = false,
fifoId: Option[Int] = None) =
{
new TLSlaveParameters(
setName = None,
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supports = TLMasterToSlaveTransferSizes(
acquireT = supportsAcquireT,
acquireB = supportsAcquireB,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = TLSlaveToMasterTransferSizes.unknownEmits,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
def v2(
address: Seq[AddressSet],
nodePath: Seq[BaseNode] = Seq(),
resources: Seq[Resource] = Seq(),
name: Option[String] = None,
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
fifoId: Option[Int] = None,
supports: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownSupports,
emits: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownEmits,
alwaysGrantsT: Boolean = false,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false) =
{
new TLSlaveParameters(
nodePath = nodePath,
resources = resources,
setName = name,
address = address,
regionType = regionType,
executable = executable,
fifoId = fifoId,
supports = supports,
emits = emits,
alwaysGrantsT = alwaysGrantsT,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut)
}
}
object TLManagerParameters {
@deprecated("Use TLSlaveParameters.v1 instead of TLManagerParameters","")
def apply(
address: Seq[AddressSet],
resources: Seq[Resource] = Seq(),
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
nodePath: Seq[BaseNode] = Seq(),
supportsAcquireT: TransferSizes = TransferSizes.none,
supportsAcquireB: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false,
alwaysGrantsT: Boolean = false,
fifoId: Option[Int] = None) =
TLSlaveParameters.v1(
address,
resources,
regionType,
executable,
nodePath,
supportsAcquireT,
supportsAcquireB,
supportsArithmetic,
supportsLogical,
supportsGet,
supportsPutFull,
supportsPutPartial,
supportsHint,
mayDenyGet,
mayDenyPut,
alwaysGrantsT,
fifoId,
)
}
case class TLChannelBeatBytes(a: Option[Int], b: Option[Int], c: Option[Int], d: Option[Int])
{
def members = Seq(a, b, c, d)
members.collect { case Some(beatBytes) =>
require (isPow2(beatBytes), "Data channel width must be a power of 2")
}
}
object TLChannelBeatBytes{
def apply(beatBytes: Int): TLChannelBeatBytes = TLChannelBeatBytes(
Some(beatBytes),
Some(beatBytes),
Some(beatBytes),
Some(beatBytes))
def apply(): TLChannelBeatBytes = TLChannelBeatBytes(
None,
None,
None,
None)
}
class TLSlavePortParameters private(
val slaves: Seq[TLSlaveParameters],
val channelBytes: TLChannelBeatBytes,
val endSinkId: Int,
val minLatency: Int,
val responseFields: Seq[BundleFieldBase],
val requestKeys: Seq[BundleKeyBase]) extends SimpleProduct
{
def sortedSlaves = slaves.sortBy(_.sortedAddress.head)
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlavePortParameters]
override def productPrefix = "TLSlavePortParameters"
def productArity: Int = 6
def productElement(n: Int): Any = n match {
case 0 => slaves
case 1 => channelBytes
case 2 => endSinkId
case 3 => minLatency
case 4 => responseFields
case 5 => requestKeys
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!slaves.isEmpty, "Slave ports must have slaves")
require (endSinkId >= 0, "Sink ids cannot be negative")
require (minLatency >= 0, "Minimum required latency cannot be negative")
// Using this API implies you cannot handle mixed-width busses
def beatBytes = {
channelBytes.members.foreach { width =>
require (width.isDefined && width == channelBytes.a)
}
channelBytes.a.get
}
// TODO this should be deprecated
def managers = slaves
def requireFifo(policy: TLFIFOFixer.Policy = TLFIFOFixer.allFIFO) = {
val relevant = slaves.filter(m => policy(m))
relevant.foreach { m =>
require(m.fifoId == relevant.head.fifoId, s"${m.name} had fifoId ${m.fifoId}, which was not homogeneous (${slaves.map(s => (s.name, s.fifoId))}) ")
}
}
// Bounds on required sizes
def maxAddress = slaves.map(_.maxAddress).max
def maxTransfer = slaves.map(_.maxTransfer).max
def mayDenyGet = slaves.exists(_.mayDenyGet)
def mayDenyPut = slaves.exists(_.mayDenyPut)
// Diplomatically determined operation sizes emitted by all outward Slaves
// as opposed to emits* which generate circuitry to check which specific addresses
val allEmitClaims = slaves.map(_.emits).reduce( _ intersect _)
// Operation Emitted by at least one outward Slaves
// as opposed to emits* which generate circuitry to check which specific addresses
val anyEmitClaims = slaves.map(_.emits).reduce(_ mincover _)
// Diplomatically determined operation sizes supported by all outward Slaves
// as opposed to supports* which generate circuitry to check which specific addresses
val allSupportClaims = slaves.map(_.supports).reduce( _ intersect _)
val allSupportAcquireT = allSupportClaims.acquireT
val allSupportAcquireB = allSupportClaims.acquireB
val allSupportArithmetic = allSupportClaims.arithmetic
val allSupportLogical = allSupportClaims.logical
val allSupportGet = allSupportClaims.get
val allSupportPutFull = allSupportClaims.putFull
val allSupportPutPartial = allSupportClaims.putPartial
val allSupportHint = allSupportClaims.hint
// Operation supported by at least one outward Slaves
// as opposed to supports* which generate circuitry to check which specific addresses
val anySupportClaims = slaves.map(_.supports).reduce(_ mincover _)
val anySupportAcquireT = !anySupportClaims.acquireT.none
val anySupportAcquireB = !anySupportClaims.acquireB.none
val anySupportArithmetic = !anySupportClaims.arithmetic.none
val anySupportLogical = !anySupportClaims.logical.none
val anySupportGet = !anySupportClaims.get.none
val anySupportPutFull = !anySupportClaims.putFull.none
val anySupportPutPartial = !anySupportClaims.putPartial.none
val anySupportHint = !anySupportClaims.hint.none
// Supporting Acquire means being routable for GrantAck
require ((endSinkId == 0) == !anySupportAcquireB)
// These return Option[TLSlaveParameters] for your convenience
def find(address: BigInt) = slaves.find(_.address.exists(_.contains(address)))
// The safe version will check the entire address
def findSafe(address: UInt) = VecInit(sortedSlaves.map(_.address.map(_.contains(address)).reduce(_ || _)))
// The fast version assumes the address is valid (you probably want fastProperty instead of this function)
def findFast(address: UInt) = {
val routingMask = AddressDecoder(slaves.map(_.address))
VecInit(sortedSlaves.map(_.address.map(_.widen(~routingMask)).distinct.map(_.contains(address)).reduce(_ || _)))
}
// Compute the simplest AddressSets that decide a key
def fastPropertyGroup[K](p: TLSlaveParameters => K): Seq[(K, Seq[AddressSet])] = {
val groups = groupByIntoSeq(sortedSlaves.map(m => (p(m), m.address)))( _._1).map { case (k, vs) =>
k -> vs.flatMap(_._2)
}
val reductionMask = AddressDecoder(groups.map(_._2))
groups.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~reductionMask)).distinct) }
}
// Select a property
def fastProperty[K, D <: Data](address: UInt, p: TLSlaveParameters => K, d: K => D): D =
Mux1H(fastPropertyGroup(p).map { case (v, a) => (a.map(_.contains(address)).reduce(_||_), d(v)) })
// Note: returns the actual fifoId + 1 or 0 if None
def findFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.map(_+1).getOrElse(0), (i:Int) => i.U)
def hasFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.isDefined, (b:Boolean) => b.B)
// Does this Port manage this ID/address?
def containsSafe(address: UInt) = findSafe(address).reduce(_ || _)
private def addressHelper(
// setting safe to false indicates that all addresses are expected to be legal, which might reduce circuit complexity
safe: Boolean,
// member filters out the sizes being checked based on the opcode being emitted or supported
member: TLSlaveParameters => TransferSizes,
address: UInt,
lgSize: UInt,
// range provides a limit on the sizes that are expected to be evaluated, which might reduce circuit complexity
range: Option[TransferSizes]): Bool = {
// trim reduces circuit complexity by intersecting checked sizes with the range argument
def trim(x: TransferSizes) = range.map(_.intersect(x)).getOrElse(x)
// groupBy returns an unordered map, convert back to Seq and sort the result for determinism
// groupByIntoSeq is turning slaves into trimmed membership sizes
// We are grouping all the slaves by their transfer size where
// if they support the trimmed size then
// member is the type of transfer that you are looking for (What you are trying to filter on)
// When you consider membership, you are trimming the sizes to only the ones that you care about
// you are filtering the slaves based on both whether they support a particular opcode and the size
// Grouping the slaves based on the actual transfer size range they support
// intersecting the range and checking their membership
// FOR SUPPORTCASES instead of returning the list of slaves,
// you are returning a map from transfer size to the set of
// address sets that are supported for that transfer size
// find all the slaves that support a certain type of operation and then group their addresses by the supported size
// for every size there could be multiple address ranges
// safety is a trade off between checking between all possible addresses vs only the addresses
// that are known to have supported sizes
// the trade off is 'checking all addresses is a more expensive circuit but will always give you
// the right answer even if you give it an illegal address'
// the not safe version is a cheaper circuit but if you give it an illegal address then it might produce the wrong answer
// fast presumes address legality
// This groupByIntoSeq deterministically groups all address sets for which a given `member` transfer size applies.
// In the resulting Map of cases, the keys are transfer sizes and the values are all address sets which emit or support that size.
val supportCases = groupByIntoSeq(slaves)(m => trim(member(m))).map { case (k: TransferSizes, vs: Seq[TLSlaveParameters]) =>
k -> vs.flatMap(_.address)
}
// safe produces a circuit that compares against all possible addresses,
// whereas fast presumes that the address is legal but uses an efficient address decoder
val mask = if (safe) ~BigInt(0) else AddressDecoder(supportCases.map(_._2))
// Simplified creates the most concise possible representation of each cases' address sets based on the mask.
val simplified = supportCases.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~mask)).distinct) }
simplified.map { case (s, a) =>
// s is a size, you are checking for this size either the size of the operation is in s
// We return an or-reduction of all the cases, checking whether any contains both the dynamic size and dynamic address on the wire.
((Some(s) == range).B || s.containsLg(lgSize)) &&
a.map(_.contains(address)).reduce(_||_)
}.foldLeft(false.B)(_||_)
}
def supportsAcquireTSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireT, address, lgSize, range)
def supportsAcquireBSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireB, address, lgSize, range)
def supportsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.arithmetic, address, lgSize, range)
def supportsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.logical, address, lgSize, range)
def supportsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.get, address, lgSize, range)
def supportsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putFull, address, lgSize, range)
def supportsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putPartial, address, lgSize, range)
def supportsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.hint, address, lgSize, range)
def supportsAcquireTFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireT, address, lgSize, range)
def supportsAcquireBFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireB, address, lgSize, range)
def supportsArithmeticFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.arithmetic, address, lgSize, range)
def supportsLogicalFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.logical, address, lgSize, range)
def supportsGetFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.get, address, lgSize, range)
def supportsPutFullFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putFull, address, lgSize, range)
def supportsPutPartialFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putPartial, address, lgSize, range)
def supportsHintFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.hint, address, lgSize, range)
def emitsProbeSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.probe, address, lgSize, range)
def emitsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.arithmetic, address, lgSize, range)
def emitsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.logical, address, lgSize, range)
def emitsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.get, address, lgSize, range)
def emitsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putFull, address, lgSize, range)
def emitsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putPartial, address, lgSize, range)
def emitsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.hint, address, lgSize, range)
def findTreeViolation() = slaves.flatMap(_.findTreeViolation()).headOption
def isTree = !slaves.exists(!_.isTree)
def infoString = "Slave Port Beatbytes = " + beatBytes + "\n" + "Slave Port MinLatency = " + minLatency + "\n\n" + slaves.map(_.infoString).mkString
def v1copy(
managers: Seq[TLSlaveParameters] = slaves,
beatBytes: Int = -1,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
new TLSlavePortParameters(
slaves = managers,
channelBytes = if (beatBytes != -1) TLChannelBeatBytes(beatBytes) else channelBytes,
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
def v2copy(
slaves: Seq[TLSlaveParameters] = slaves,
channelBytes: TLChannelBeatBytes = channelBytes,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
new TLSlavePortParameters(
slaves = slaves,
channelBytes = channelBytes,
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
@deprecated("Use v1copy instead of copy","")
def copy(
managers: Seq[TLSlaveParameters] = slaves,
beatBytes: Int = -1,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
v1copy(
managers,
beatBytes,
endSinkId,
minLatency,
responseFields,
requestKeys)
}
}
object TLSlavePortParameters {
def v1(
managers: Seq[TLSlaveParameters],
beatBytes: Int,
endSinkId: Int = 0,
minLatency: Int = 0,
responseFields: Seq[BundleFieldBase] = Nil,
requestKeys: Seq[BundleKeyBase] = Nil) =
{
new TLSlavePortParameters(
slaves = managers,
channelBytes = TLChannelBeatBytes(beatBytes),
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
}
object TLManagerPortParameters {
@deprecated("Use TLSlavePortParameters.v1 instead of TLManagerPortParameters","")
def apply(
managers: Seq[TLSlaveParameters],
beatBytes: Int,
endSinkId: Int = 0,
minLatency: Int = 0,
responseFields: Seq[BundleFieldBase] = Nil,
requestKeys: Seq[BundleKeyBase] = Nil) =
{
TLSlavePortParameters.v1(
managers,
beatBytes,
endSinkId,
minLatency,
responseFields,
requestKeys)
}
}
class TLMasterParameters private(
val nodePath: Seq[BaseNode],
val resources: Seq[Resource],
val name: String,
val visibility: Seq[AddressSet],
val unusedRegionTypes: Set[RegionType.T],
val executesOnly: Boolean,
val requestFifo: Boolean, // only a request, not a requirement. applies to A, not C.
val supports: TLSlaveToMasterTransferSizes,
val emits: TLMasterToSlaveTransferSizes,
val neverReleasesData: Boolean,
val sourceId: IdRange) extends SimpleProduct
{
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterParameters]
override def productPrefix = "TLMasterParameters"
// We intentionally omit nodePath for equality testing / formatting
def productArity: Int = 10
def productElement(n: Int): Any = n match {
case 0 => name
case 1 => sourceId
case 2 => resources
case 3 => visibility
case 4 => unusedRegionTypes
case 5 => executesOnly
case 6 => requestFifo
case 7 => supports
case 8 => emits
case 9 => neverReleasesData
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!sourceId.isEmpty)
require (!visibility.isEmpty)
require (supports.putFull.contains(supports.putPartial))
// We only support these operations if we support Probe (ie: we're a cache)
require (supports.probe.contains(supports.arithmetic))
require (supports.probe.contains(supports.logical))
require (supports.probe.contains(supports.get))
require (supports.probe.contains(supports.putFull))
require (supports.probe.contains(supports.putPartial))
require (supports.probe.contains(supports.hint))
visibility.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") }
val maxTransfer = List(
supports.probe.max,
supports.arithmetic.max,
supports.logical.max,
supports.get.max,
supports.putFull.max,
supports.putPartial.max).max
def infoString = {
s"""Master Name = ${name}
|visibility = ${visibility}
|emits = ${emits.infoString}
|sourceId = ${sourceId}
|
|""".stripMargin
}
def v1copy(
name: String = name,
sourceId: IdRange = sourceId,
nodePath: Seq[BaseNode] = nodePath,
requestFifo: Boolean = requestFifo,
visibility: Seq[AddressSet] = visibility,
supportsProbe: TransferSizes = supports.probe,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = this.resources,
name = name,
visibility = visibility,
unusedRegionTypes = this.unusedRegionTypes,
executesOnly = this.executesOnly,
requestFifo = requestFifo,
supports = TLSlaveToMasterTransferSizes(
probe = supportsProbe,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = this.emits,
neverReleasesData = this.neverReleasesData,
sourceId = sourceId)
}
def v2copy(
nodePath: Seq[BaseNode] = nodePath,
resources: Seq[Resource] = resources,
name: String = name,
visibility: Seq[AddressSet] = visibility,
unusedRegionTypes: Set[RegionType.T] = unusedRegionTypes,
executesOnly: Boolean = executesOnly,
requestFifo: Boolean = requestFifo,
supports: TLSlaveToMasterTransferSizes = supports,
emits: TLMasterToSlaveTransferSizes = emits,
neverReleasesData: Boolean = neverReleasesData,
sourceId: IdRange = sourceId) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = resources,
name = name,
visibility = visibility,
unusedRegionTypes = unusedRegionTypes,
executesOnly = executesOnly,
requestFifo = requestFifo,
supports = supports,
emits = emits,
neverReleasesData = neverReleasesData,
sourceId = sourceId)
}
@deprecated("Use v1copy instead of copy","")
def copy(
name: String = name,
sourceId: IdRange = sourceId,
nodePath: Seq[BaseNode] = nodePath,
requestFifo: Boolean = requestFifo,
visibility: Seq[AddressSet] = visibility,
supportsProbe: TransferSizes = supports.probe,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint) =
{
v1copy(
name = name,
sourceId = sourceId,
nodePath = nodePath,
requestFifo = requestFifo,
visibility = visibility,
supportsProbe = supportsProbe,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint)
}
}
object TLMasterParameters {
def v1(
name: String,
sourceId: IdRange = IdRange(0,1),
nodePath: Seq[BaseNode] = Seq(),
requestFifo: Boolean = false,
visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)),
supportsProbe: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = Nil,
name = name,
visibility = visibility,
unusedRegionTypes = Set(),
executesOnly = false,
requestFifo = requestFifo,
supports = TLSlaveToMasterTransferSizes(
probe = supportsProbe,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = TLMasterToSlaveTransferSizes.unknownEmits,
neverReleasesData = false,
sourceId = sourceId)
}
def v2(
nodePath: Seq[BaseNode] = Seq(),
resources: Seq[Resource] = Nil,
name: String,
visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)),
unusedRegionTypes: Set[RegionType.T] = Set(),
executesOnly: Boolean = false,
requestFifo: Boolean = false,
supports: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownSupports,
emits: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownEmits,
neverReleasesData: Boolean = false,
sourceId: IdRange = IdRange(0,1)) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = resources,
name = name,
visibility = visibility,
unusedRegionTypes = unusedRegionTypes,
executesOnly = executesOnly,
requestFifo = requestFifo,
supports = supports,
emits = emits,
neverReleasesData = neverReleasesData,
sourceId = sourceId)
}
}
object TLClientParameters {
@deprecated("Use TLMasterParameters.v1 instead of TLClientParameters","")
def apply(
name: String,
sourceId: IdRange = IdRange(0,1),
nodePath: Seq[BaseNode] = Seq(),
requestFifo: Boolean = false,
visibility: Seq[AddressSet] = Seq(AddressSet.everything),
supportsProbe: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none) =
{
TLMasterParameters.v1(
name = name,
sourceId = sourceId,
nodePath = nodePath,
requestFifo = requestFifo,
visibility = visibility,
supportsProbe = supportsProbe,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint)
}
}
class TLMasterPortParameters private(
val masters: Seq[TLMasterParameters],
val channelBytes: TLChannelBeatBytes,
val minLatency: Int,
val echoFields: Seq[BundleFieldBase],
val requestFields: Seq[BundleFieldBase],
val responseKeys: Seq[BundleKeyBase]) extends SimpleProduct
{
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterPortParameters]
override def productPrefix = "TLMasterPortParameters"
def productArity: Int = 6
def productElement(n: Int): Any = n match {
case 0 => masters
case 1 => channelBytes
case 2 => minLatency
case 3 => echoFields
case 4 => requestFields
case 5 => responseKeys
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!masters.isEmpty)
require (minLatency >= 0)
def clients = masters
// Require disjoint ranges for Ids
IdRange.overlaps(masters.map(_.sourceId)).foreach { case (x, y) =>
require (!x.overlaps(y), s"TLClientParameters.sourceId ${x} overlaps ${y}")
}
// Bounds on required sizes
def endSourceId = masters.map(_.sourceId.end).max
def maxTransfer = masters.map(_.maxTransfer).max
// The unused sources < endSourceId
def unusedSources: Seq[Int] = {
val usedSources = masters.map(_.sourceId).sortBy(_.start)
((Seq(0) ++ usedSources.map(_.end)) zip usedSources.map(_.start)) flatMap { case (end, start) =>
end until start
}
}
// Diplomatically determined operation sizes emitted by all inward Masters
// as opposed to emits* which generate circuitry to check which specific addresses
val allEmitClaims = masters.map(_.emits).reduce( _ intersect _)
// Diplomatically determined operation sizes Emitted by at least one inward Masters
// as opposed to emits* which generate circuitry to check which specific addresses
val anyEmitClaims = masters.map(_.emits).reduce(_ mincover _)
// Diplomatically determined operation sizes supported by all inward Masters
// as opposed to supports* which generate circuitry to check which specific addresses
val allSupportProbe = masters.map(_.supports.probe) .reduce(_ intersect _)
val allSupportArithmetic = masters.map(_.supports.arithmetic).reduce(_ intersect _)
val allSupportLogical = masters.map(_.supports.logical) .reduce(_ intersect _)
val allSupportGet = masters.map(_.supports.get) .reduce(_ intersect _)
val allSupportPutFull = masters.map(_.supports.putFull) .reduce(_ intersect _)
val allSupportPutPartial = masters.map(_.supports.putPartial).reduce(_ intersect _)
val allSupportHint = masters.map(_.supports.hint) .reduce(_ intersect _)
// Diplomatically determined operation sizes supported by at least one master
// as opposed to supports* which generate circuitry to check which specific addresses
val anySupportProbe = masters.map(!_.supports.probe.none) .reduce(_ || _)
val anySupportArithmetic = masters.map(!_.supports.arithmetic.none).reduce(_ || _)
val anySupportLogical = masters.map(!_.supports.logical.none) .reduce(_ || _)
val anySupportGet = masters.map(!_.supports.get.none) .reduce(_ || _)
val anySupportPutFull = masters.map(!_.supports.putFull.none) .reduce(_ || _)
val anySupportPutPartial = masters.map(!_.supports.putPartial.none).reduce(_ || _)
val anySupportHint = masters.map(!_.supports.hint.none) .reduce(_ || _)
// These return Option[TLMasterParameters] for your convenience
def find(id: Int) = masters.find(_.sourceId.contains(id))
// Synthesizable lookup methods
def find(id: UInt) = VecInit(masters.map(_.sourceId.contains(id)))
def contains(id: UInt) = find(id).reduce(_ || _)
def requestFifo(id: UInt) = Mux1H(find(id), masters.map(c => c.requestFifo.B))
// Available during RTL runtime, checks to see if (id, size) is supported by the master's (client's) diplomatic parameters
private def sourceIdHelper(member: TLMasterParameters => TransferSizes)(id: UInt, lgSize: UInt) = {
val allSame = masters.map(member(_) == member(masters(0))).reduce(_ && _)
// this if statement is a coarse generalization of the groupBy in the sourceIdHelper2 version;
// the case where there is only one group.
if (allSame) member(masters(0)).containsLg(lgSize) else {
// Find the master associated with ID and returns whether that particular master is able to receive transaction of lgSize
Mux1H(find(id), masters.map(member(_).containsLg(lgSize)))
}
}
// Check for support of a given operation at a specific id
val supportsProbe = sourceIdHelper(_.supports.probe) _
val supportsArithmetic = sourceIdHelper(_.supports.arithmetic) _
val supportsLogical = sourceIdHelper(_.supports.logical) _
val supportsGet = sourceIdHelper(_.supports.get) _
val supportsPutFull = sourceIdHelper(_.supports.putFull) _
val supportsPutPartial = sourceIdHelper(_.supports.putPartial) _
val supportsHint = sourceIdHelper(_.supports.hint) _
// TODO: Merge sourceIdHelper2 with sourceIdHelper
private def sourceIdHelper2(
member: TLMasterParameters => TransferSizes,
sourceId: UInt,
lgSize: UInt): Bool = {
// Because sourceIds are uniquely owned by each master, we use them to group the
// cases that have to be checked.
val emitCases = groupByIntoSeq(masters)(m => member(m)).map { case (k, vs) =>
k -> vs.map(_.sourceId)
}
emitCases.map { case (s, a) =>
(s.containsLg(lgSize)) &&
a.map(_.contains(sourceId)).reduce(_||_)
}.foldLeft(false.B)(_||_)
}
// Check for emit of a given operation at a specific id
def emitsAcquireT (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireT, sourceId, lgSize)
def emitsAcquireB (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireB, sourceId, lgSize)
def emitsArithmetic(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.arithmetic, sourceId, lgSize)
def emitsLogical (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.logical, sourceId, lgSize)
def emitsGet (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.get, sourceId, lgSize)
def emitsPutFull (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putFull, sourceId, lgSize)
def emitsPutPartial(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putPartial, sourceId, lgSize)
def emitsHint (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.hint, sourceId, lgSize)
def infoString = masters.map(_.infoString).mkString
def v1copy(
clients: Seq[TLMasterParameters] = masters,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
new TLMasterPortParameters(
masters = clients,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
def v2copy(
masters: Seq[TLMasterParameters] = masters,
channelBytes: TLChannelBeatBytes = channelBytes,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
new TLMasterPortParameters(
masters = masters,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
@deprecated("Use v1copy instead of copy","")
def copy(
clients: Seq[TLMasterParameters] = masters,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
v1copy(
clients,
minLatency,
echoFields,
requestFields,
responseKeys)
}
}
object TLClientPortParameters {
@deprecated("Use TLMasterPortParameters.v1 instead of TLClientPortParameters","")
def apply(
clients: Seq[TLMasterParameters],
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
TLMasterPortParameters.v1(
clients,
minLatency,
echoFields,
requestFields,
responseKeys)
}
}
object TLMasterPortParameters {
def v1(
clients: Seq[TLMasterParameters],
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
new TLMasterPortParameters(
masters = clients,
channelBytes = TLChannelBeatBytes(),
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
def v2(
masters: Seq[TLMasterParameters],
channelBytes: TLChannelBeatBytes = TLChannelBeatBytes(),
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
new TLMasterPortParameters(
masters = masters,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
}
case class TLBundleParameters(
addressBits: Int,
dataBits: Int,
sourceBits: Int,
sinkBits: Int,
sizeBits: Int,
echoFields: Seq[BundleFieldBase],
requestFields: Seq[BundleFieldBase],
responseFields: Seq[BundleFieldBase],
hasBCE: Boolean)
{
// Chisel has issues with 0-width wires
require (addressBits >= 1)
require (dataBits >= 8)
require (sourceBits >= 1)
require (sinkBits >= 1)
require (sizeBits >= 1)
require (isPow2(dataBits))
echoFields.foreach { f => require (f.key.isControl, s"${f} is not a legal echo field") }
val addrLoBits = log2Up(dataBits/8)
// Used to uniquify bus IP names
def shortName = s"a${addressBits}d${dataBits}s${sourceBits}k${sinkBits}z${sizeBits}" + (if (hasBCE) "c" else "u")
def union(x: TLBundleParameters) =
TLBundleParameters(
max(addressBits, x.addressBits),
max(dataBits, x.dataBits),
max(sourceBits, x.sourceBits),
max(sinkBits, x.sinkBits),
max(sizeBits, x.sizeBits),
echoFields = BundleField.union(echoFields ++ x.echoFields),
requestFields = BundleField.union(requestFields ++ x.requestFields),
responseFields = BundleField.union(responseFields ++ x.responseFields),
hasBCE || x.hasBCE)
}
object TLBundleParameters
{
val emptyBundleParams = TLBundleParameters(
addressBits = 1,
dataBits = 8,
sourceBits = 1,
sinkBits = 1,
sizeBits = 1,
echoFields = Nil,
requestFields = Nil,
responseFields = Nil,
hasBCE = false)
def union(x: Seq[TLBundleParameters]) = x.foldLeft(emptyBundleParams)((x,y) => x.union(y))
def apply(master: TLMasterPortParameters, slave: TLSlavePortParameters) =
new TLBundleParameters(
addressBits = log2Up(slave.maxAddress + 1),
dataBits = slave.beatBytes * 8,
sourceBits = log2Up(master.endSourceId),
sinkBits = log2Up(slave.endSinkId),
sizeBits = log2Up(log2Ceil(max(master.maxTransfer, slave.maxTransfer))+1),
echoFields = master.echoFields,
requestFields = BundleField.accept(master.requestFields, slave.requestKeys),
responseFields = BundleField.accept(slave.responseFields, master.responseKeys),
hasBCE = master.anySupportProbe && slave.anySupportAcquireB)
}
case class TLEdgeParameters(
master: TLMasterPortParameters,
slave: TLSlavePortParameters,
params: Parameters,
sourceInfo: SourceInfo) extends FormatEdge
{
// legacy names:
def manager = slave
def client = master
val maxTransfer = max(master.maxTransfer, slave.maxTransfer)
val maxLgSize = log2Ceil(maxTransfer)
// Sanity check the link...
require (maxTransfer >= slave.beatBytes, s"Link's max transfer (${maxTransfer}) < ${slave.slaves.map(_.name)}'s beatBytes (${slave.beatBytes})")
def diplomaticClaimsMasterToSlave = master.anyEmitClaims.intersect(slave.anySupportClaims)
val bundle = TLBundleParameters(master, slave)
def formatEdge = master.infoString + "\n" + slave.infoString
}
case class TLCreditedDelay(
a: CreditedDelay,
b: CreditedDelay,
c: CreditedDelay,
d: CreditedDelay,
e: CreditedDelay)
{
def + (that: TLCreditedDelay): TLCreditedDelay = TLCreditedDelay(
a = a + that.a,
b = b + that.b,
c = c + that.c,
d = d + that.d,
e = e + that.e)
override def toString = s"(${a}, ${b}, ${c}, ${d}, ${e})"
}
object TLCreditedDelay {
def apply(delay: CreditedDelay): TLCreditedDelay = apply(delay, delay.flip, delay, delay.flip, delay)
}
case class TLCreditedManagerPortParameters(delay: TLCreditedDelay, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLCreditedClientPortParameters(delay: TLCreditedDelay, base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLCreditedEdgeParameters(client: TLCreditedClientPortParameters, manager: TLCreditedManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val delay = client.delay + manager.delay
val bundle = TLBundleParameters(client.base, manager.base)
def formatEdge = client.infoString + "\n" + manager.infoString
}
case class TLAsyncManagerPortParameters(async: AsyncQueueParams, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLAsyncClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLAsyncBundleParameters(async: AsyncQueueParams, base: TLBundleParameters)
case class TLAsyncEdgeParameters(client: TLAsyncClientPortParameters, manager: TLAsyncManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val bundle = TLAsyncBundleParameters(manager.async, TLBundleParameters(client.base, manager.base))
def formatEdge = client.infoString + "\n" + manager.infoString
}
case class TLRationalManagerPortParameters(direction: RationalDirection, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLRationalClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLRationalEdgeParameters(client: TLRationalClientPortParameters, manager: TLRationalManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val bundle = TLBundleParameters(client.base, manager.base)
def formatEdge = client.infoString + "\n" + manager.infoString
}
// To be unified, devices must agree on all of these terms
case class ManagerUnificationKey(
resources: Seq[Resource],
regionType: RegionType.T,
executable: Boolean,
supportsAcquireT: TransferSizes,
supportsAcquireB: TransferSizes,
supportsArithmetic: TransferSizes,
supportsLogical: TransferSizes,
supportsGet: TransferSizes,
supportsPutFull: TransferSizes,
supportsPutPartial: TransferSizes,
supportsHint: TransferSizes)
object ManagerUnificationKey
{
def apply(x: TLSlaveParameters): ManagerUnificationKey = ManagerUnificationKey(
resources = x.resources,
regionType = x.regionType,
executable = x.executable,
supportsAcquireT = x.supportsAcquireT,
supportsAcquireB = x.supportsAcquireB,
supportsArithmetic = x.supportsArithmetic,
supportsLogical = x.supportsLogical,
supportsGet = x.supportsGet,
supportsPutFull = x.supportsPutFull,
supportsPutPartial = x.supportsPutPartial,
supportsHint = x.supportsHint)
}
object ManagerUnification
{
def apply(slaves: Seq[TLSlaveParameters]): List[TLSlaveParameters] = {
slaves.groupBy(ManagerUnificationKey.apply).values.map { seq =>
val agree = seq.forall(_.fifoId == seq.head.fifoId)
seq(0).v1copy(
address = AddressSet.unify(seq.flatMap(_.address)),
fifoId = if (agree) seq(0).fifoId else None)
}.toList
}
}
case class TLBufferParams(
a: BufferParams = BufferParams.none,
b: BufferParams = BufferParams.none,
c: BufferParams = BufferParams.none,
d: BufferParams = BufferParams.none,
e: BufferParams = BufferParams.none
) extends DirectedBuffers[TLBufferParams] {
def copyIn(x: BufferParams) = this.copy(b = x, d = x)
def copyOut(x: BufferParams) = this.copy(a = x, c = x, e = x)
def copyInOut(x: BufferParams) = this.copyIn(x).copyOut(x)
}
/** Pretty printing of TL source id maps */
class TLSourceIdMap(tl: TLMasterPortParameters) extends IdMap[TLSourceIdMapEntry] {
private val tlDigits = String.valueOf(tl.endSourceId-1).length()
protected val fmt = s"\t[%${tlDigits}d, %${tlDigits}d) %s%s%s"
private val sorted = tl.masters.sortBy(_.sourceId)
val mapping: Seq[TLSourceIdMapEntry] = sorted.map { case c =>
TLSourceIdMapEntry(c.sourceId, c.name, c.supports.probe, c.requestFifo)
}
}
case class TLSourceIdMapEntry(tlId: IdRange, name: String, isCache: Boolean, requestFifo: Boolean)
extends IdMapEntry
{
val from = tlId
val to = tlId
val maxTransactionsInFlight = Some(tlId.size)
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_1( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [1:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [3:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [1:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [31:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7]
wire [3:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7]
wire _source_ok_T = 1'h0; // @[Parameters.scala:54:10]
wire _source_ok_T_6 = 1'h0; // @[Parameters.scala:54:10]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [8:0] c_first_beats1_decode = 9'h0; // @[Edges.scala:220:59]
wire [8:0] c_first_beats1 = 9'h0; // @[Edges.scala:221:14]
wire [8:0] _c_first_count_T = 9'h0; // @[Edges.scala:234:27]
wire [8:0] c_first_count = 9'h0; // @[Edges.scala:234:25]
wire [8:0] _c_first_counter_T = 9'h0; // @[Edges.scala:236:21]
wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire _source_ok_T_4 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:56:48]
wire _source_ok_WIRE_0 = 1'h1; // @[Parameters.scala:1138:31]
wire _source_ok_T_7 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_8 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:54:67]
wire _source_ok_T_10 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:56:48]
wire _source_ok_WIRE_1_0 = 1'h1; // @[Parameters.scala:1138:31]
wire sink_ok = 1'h1; // @[Monitor.scala:309:31]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [8:0] c_first_counter1 = 9'h1FF; // @[Edges.scala:230:28]
wire [9:0] _c_first_counter1_T = 10'h3FF; // @[Edges.scala:230:28]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_first_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_first_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_first_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_first_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] c_sizes_set = 32'h0; // @[Monitor.scala:741:34]
wire [31:0] _c_set_wo_ready_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_set_wo_ready_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_opcodes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_opcodes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_sizes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_sizes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_opcodes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_opcodes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_sizes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_sizes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_probe_ack_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_probe_ack_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_probe_ack_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_probe_ack_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_4_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_5_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_first_WIRE_bits_source = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_first_WIRE_1_bits_source = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_first_WIRE_2_bits_source = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_first_WIRE_3_bits_source = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_set_wo_ready_WIRE_bits_source = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_set_wo_ready_WIRE_1_bits_source = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_set_WIRE_bits_source = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_set_WIRE_1_bits_source = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_opcodes_set_interm_WIRE_bits_source = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_opcodes_set_interm_WIRE_1_bits_source = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_sizes_set_interm_WIRE_bits_source = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_sizes_set_interm_WIRE_1_bits_source = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_opcodes_set_WIRE_bits_source = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_opcodes_set_WIRE_1_bits_source = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_sizes_set_WIRE_bits_source = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_sizes_set_WIRE_1_bits_source = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_probe_ack_WIRE_bits_source = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_probe_ack_WIRE_1_bits_source = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_probe_ack_WIRE_2_bits_source = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_probe_ack_WIRE_3_bits_source = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_bits_source = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_1_bits_source = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_2_bits_source = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_3_bits_source = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_4_bits_source = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_5_bits_source = 2'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_first_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_first_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_first_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_first_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] c_set = 4'h0; // @[Monitor.scala:738:34]
wire [3:0] c_set_wo_ready = 4'h0; // @[Monitor.scala:739:34]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] _c_set_wo_ready_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_set_wo_ready_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_opcodes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_sizes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_opcodes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_sizes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_sizes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_probe_ack_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_probe_ack_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_probe_ack_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_probe_ack_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hFF; // @[Monitor.scala:612:57]
wire [15:0] _c_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hFF; // @[Monitor.scala:724:57]
wire [16:0] _a_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hFF; // @[Monitor.scala:612:57]
wire [16:0] _c_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hFF; // @[Monitor.scala:724:57]
wire [15:0] _a_size_lookup_T_3 = 16'h100; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h100; // @[Monitor.scala:612:51]
wire [15:0] _c_size_lookup_T_3 = 16'h100; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h100; // @[Monitor.scala:724:51]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [35:0] _c_sizes_set_T_1 = 36'h0; // @[Monitor.scala:768:52]
wire [4:0] c_sizes_set_interm = 5'h0; // @[Monitor.scala:755:40]
wire [4:0] _c_sizes_set_interm_T = 5'h0; // @[Monitor.scala:766:51]
wire [4:0] _c_opcodes_set_T = 5'h0; // @[Monitor.scala:767:79]
wire [4:0] _c_sizes_set_T = 5'h0; // @[Monitor.scala:768:77]
wire [34:0] _c_opcodes_set_T_1 = 35'h0; // @[Monitor.scala:767:54]
wire [4:0] _c_sizes_set_interm_T_1 = 5'h1; // @[Monitor.scala:766:59]
wire [3:0] _c_set_wo_ready_T = 4'h1; // @[OneHot.scala:58:35]
wire [3:0] _c_set_T = 4'h1; // @[OneHot.scala:58:35]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [15:0] c_opcodes_set = 16'h0; // @[Monitor.scala:740:34]
wire [11:0] _c_first_beats1_decode_T_2 = 12'h0; // @[package.scala:243:46]
wire [11:0] _c_first_beats1_decode_T_1 = 12'hFFF; // @[package.scala:243:76]
wire [26:0] _c_first_beats1_decode_T = 27'hFFF; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_size_lookup_T_2 = 4'h8; // @[Monitor.scala:641:117]
wire [3:0] _d_sizes_clr_T = 4'h8; // @[Monitor.scala:681:48]
wire [3:0] _c_size_lookup_T_2 = 4'h8; // @[Monitor.scala:750:119]
wire [3:0] _d_sizes_clr_T_6 = 4'h8; // @[Monitor.scala:791:48]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [1:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [1:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [1:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [1:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [1:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [1:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [1:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [1:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [1:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [1:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [1:0] _source_ok_uncommonBits_T_1 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire [26:0] _GEN = 27'hFFF << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [26:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [26:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [26:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [11:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [31:0] _is_aligned_T = {20'h0, io_in_a_bits_address_0[11:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 32'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 4'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [1:0] uncommonBits = _uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_1 = _uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_4 = _uncommonBits_T_4; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_5 = _uncommonBits_T_5; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_6 = _uncommonBits_T_6; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_7 = _uncommonBits_T_7; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8; // @[Parameters.scala:52:{29,56}]
wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire _T_1257 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1257; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1257; // @[Decoupled.scala:51:35]
wire [11:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [8:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] a_first_counter; // @[Edges.scala:229:27]
wire [9:0] _a_first_counter1_T = {1'h0, a_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] a_first_counter1 = _a_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [8:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [3:0] size; // @[Monitor.scala:389:22]
reg [1:0] source; // @[Monitor.scala:390:22]
reg [31:0] address; // @[Monitor.scala:391:22]
wire _T_1330 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1330; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1330; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1330; // @[Decoupled.scala:51:35]
wire [26:0] _GEN_0 = 27'hFFF << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [11:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [8:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T = {1'h0, d_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1 = _d_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [3:0] size_1; // @[Monitor.scala:540:22]
reg [1:0] source_1; // @[Monitor.scala:541:22]
reg [2:0] sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [3:0] inflight; // @[Monitor.scala:614:27]
reg [15:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [31:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [11:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [8:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] a_first_counter1_1 = _a_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [11:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire [8:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1_1 = _d_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] a_set; // @[Monitor.scala:626:34]
wire [3:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [15:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [31:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [4:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [4:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [4:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [4:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [4:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [15:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [15:0] _a_opcode_lookup_T_6 = _a_opcode_lookup_T_1 & 16'hF; // @[Monitor.scala:637:{44,97}]
wire [15:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [7:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [4:0] _GEN_2 = {io_in_d_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :641:65]
wire [4:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65]
wire [4:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_2; // @[Monitor.scala:641:65, :681:99]
wire [4:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65, :750:67]
wire [4:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_2; // @[Monitor.scala:641:65, :791:99]
wire [31:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [31:0] _a_size_lookup_T_6 = {24'h0, _a_size_lookup_T_1[7:0]}; // @[Monitor.scala:641:{40,91}]
wire [31:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[31:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[7:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [4:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [3:0] _GEN_3 = {2'h0, io_in_a_bits_source_0}; // @[OneHot.scala:58:35]
wire [3:0] _GEN_4 = 4'h1 << _GEN_3; // @[OneHot.scala:58:35]
wire [3:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_4; // @[OneHot.scala:58:35]
wire [3:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_4; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T : 4'h0; // @[OneHot.scala:58:35]
wire _T_1183 = _T_1257 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_1183 ? _a_set_T : 4'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_1183 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [4:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [4:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_1183 ? _a_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [4:0] _a_opcodes_set_T = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [34:0] _a_opcodes_set_T_1 = {31'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_1183 ? _a_opcodes_set_T_1[15:0] : 16'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [4:0] _a_sizes_set_T = {io_in_a_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :660:77]
wire [35:0] _a_sizes_set_T_1 = {31'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_1183 ? _a_sizes_set_T_1[31:0] : 32'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [3:0] d_clr; // @[Monitor.scala:664:34]
wire [3:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [15:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [31:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_5 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_5; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_5; // @[Monitor.scala:673:46, :783:46]
wire _T_1229 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [3:0] _GEN_6 = {2'h0, io_in_d_bits_source_0}; // @[OneHot.scala:58:35]
wire [3:0] _GEN_7 = 4'h1 << _GEN_6; // @[OneHot.scala:58:35]
wire [3:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_7; // @[OneHot.scala:58:35]
wire [3:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_7; // @[OneHot.scala:58:35]
wire [3:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_7; // @[OneHot.scala:58:35]
wire [3:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_7; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1229 & ~d_release_ack ? _d_clr_wo_ready_T : 4'h0; // @[OneHot.scala:58:35]
wire _T_1198 = _T_1330 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_1198 ? _d_clr_T : 4'h0; // @[OneHot.scala:58:35]
wire [46:0] _d_opcodes_clr_T_5 = 47'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_1198 ? _d_opcodes_clr_T_5[15:0] : 16'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [46:0] _d_sizes_clr_T_5 = 47'hFF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_1198 ? _d_sizes_clr_T_5[31:0] : 32'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [3:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [3:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [3:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [15:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [15:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [15:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [31:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [31:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [31:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [3:0] inflight_1; // @[Monitor.scala:726:35]
wire [3:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [15:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [15:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [31:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [31:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [11:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[11:3]; // @[package.scala:243:46]
wire [8:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1_2 = _d_first_counter1_T_2[8:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [7:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [15:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [15:0] _c_opcode_lookup_T_6 = _c_opcode_lookup_T_1 & 16'hF; // @[Monitor.scala:749:{44,97}]
wire [15:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [31:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [31:0] _c_size_lookup_T_6 = {24'h0, _c_size_lookup_T_1[7:0]}; // @[Monitor.scala:750:{42,93}]
wire [31:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[31:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[7:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [3:0] d_clr_1; // @[Monitor.scala:774:34]
wire [3:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [15:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [31:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1301 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1301 & d_release_ack_1 ? _d_clr_wo_ready_T_1 : 4'h0; // @[OneHot.scala:58:35]
wire _T_1283 = _T_1330 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1283 ? _d_clr_T_1 : 4'h0; // @[OneHot.scala:58:35]
wire [46:0] _d_opcodes_clr_T_11 = 47'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1283 ? _d_opcodes_clr_T_11[15:0] : 16'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [46:0] _d_sizes_clr_T_11 = 47'hFF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1283 ? _d_sizes_clr_T_11[31:0] : 32'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 2'h0; // @[Monitor.scala:36:7, :795:113]
wire [3:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [3:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [15:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [15:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [31:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [31:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module TLBuffer_a29d64s10k1z3u( // @[Buffer.scala:40:9]
input clock, // @[Buffer.scala:40:9]
input reset, // @[Buffer.scala:40:9]
output auto_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [9:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [28:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [9:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [9:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [28:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [9:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_d_bits_data // @[LazyModuleImp.scala:107:25]
);
wire auto_in_a_valid_0 = auto_in_a_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_opcode_0 = auto_in_a_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_param_0 = auto_in_a_bits_param; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_size_0 = auto_in_a_bits_size; // @[Buffer.scala:40:9]
wire [9:0] auto_in_a_bits_source_0 = auto_in_a_bits_source; // @[Buffer.scala:40:9]
wire [28:0] auto_in_a_bits_address_0 = auto_in_a_bits_address; // @[Buffer.scala:40:9]
wire [7:0] auto_in_a_bits_mask_0 = auto_in_a_bits_mask; // @[Buffer.scala:40:9]
wire [63:0] auto_in_a_bits_data_0 = auto_in_a_bits_data; // @[Buffer.scala:40:9]
wire auto_in_a_bits_corrupt_0 = auto_in_a_bits_corrupt; // @[Buffer.scala:40:9]
wire auto_in_d_ready_0 = auto_in_d_ready; // @[Buffer.scala:40:9]
wire auto_out_a_ready_0 = auto_out_a_ready; // @[Buffer.scala:40:9]
wire auto_out_d_valid_0 = auto_out_d_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_out_d_bits_opcode_0 = auto_out_d_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] auto_out_d_bits_size_0 = auto_out_d_bits_size; // @[Buffer.scala:40:9]
wire [9:0] auto_out_d_bits_source_0 = auto_out_d_bits_source; // @[Buffer.scala:40:9]
wire [63:0] auto_out_d_bits_data_0 = auto_out_d_bits_data; // @[Buffer.scala:40:9]
wire auto_out_d_bits_sink = 1'h0; // @[Decoupled.scala:362:21]
wire auto_out_d_bits_denied = 1'h0; // @[Decoupled.scala:362:21]
wire auto_out_d_bits_corrupt = 1'h0; // @[Decoupled.scala:362:21]
wire nodeOut_d_bits_sink = 1'h0; // @[Decoupled.scala:362:21]
wire nodeOut_d_bits_denied = 1'h0; // @[Decoupled.scala:362:21]
wire nodeOut_d_bits_corrupt = 1'h0; // @[Decoupled.scala:362:21]
wire [1:0] auto_out_d_bits_param = 2'h0; // @[Decoupled.scala:362:21]
wire nodeIn_a_ready; // @[MixedNode.scala:551:17]
wire [1:0] nodeOut_d_bits_param = 2'h0; // @[Decoupled.scala:362:21]
wire nodeIn_a_valid = auto_in_a_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_opcode = auto_in_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_param = auto_in_a_bits_param_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_size = auto_in_a_bits_size_0; // @[Buffer.scala:40:9]
wire [9:0] nodeIn_a_bits_source = auto_in_a_bits_source_0; // @[Buffer.scala:40:9]
wire [28:0] nodeIn_a_bits_address = auto_in_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] nodeIn_a_bits_mask = auto_in_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] nodeIn_a_bits_data = auto_in_a_bits_data_0; // @[Buffer.scala:40:9]
wire nodeIn_a_bits_corrupt = auto_in_a_bits_corrupt_0; // @[Buffer.scala:40:9]
wire nodeIn_d_ready = auto_in_d_ready_0; // @[Buffer.scala:40:9]
wire nodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] nodeIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [9:0] nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] nodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire nodeOut_a_ready = auto_out_a_ready_0; // @[Buffer.scala:40:9]
wire nodeOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [9:0] nodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [28:0] nodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire nodeOut_d_ready; // @[MixedNode.scala:542:17]
wire nodeOut_d_valid = auto_out_d_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeOut_d_bits_opcode = auto_out_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] nodeOut_d_bits_size = auto_out_d_bits_size_0; // @[Buffer.scala:40:9]
wire [9:0] nodeOut_d_bits_source = auto_out_d_bits_source_0; // @[Buffer.scala:40:9]
wire [63:0] nodeOut_d_bits_data = auto_out_d_bits_data_0; // @[Buffer.scala:40:9]
wire auto_in_a_ready_0; // @[Buffer.scala:40:9]
wire [2:0] auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [1:0] auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
wire [2:0] auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
wire [9:0] auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
wire [63:0] auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_in_d_valid_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
wire [9:0] auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
wire [28:0] auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
wire auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_out_a_valid_0; // @[Buffer.scala:40:9]
wire auto_out_d_ready_0; // @[Buffer.scala:40:9]
assign auto_in_a_ready_0 = nodeIn_a_ready; // @[Buffer.scala:40:9]
assign auto_in_d_valid_0 = nodeIn_d_valid; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode_0 = nodeIn_d_bits_opcode; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param_0 = nodeIn_d_bits_param; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size_0 = nodeIn_d_bits_size; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source_0 = nodeIn_d_bits_source; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink_0 = nodeIn_d_bits_sink; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied_0 = nodeIn_d_bits_denied; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data_0 = nodeIn_d_bits_data; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt_0 = nodeIn_d_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_a_valid_0 = nodeOut_a_valid; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode_0 = nodeOut_a_bits_opcode; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param_0 = nodeOut_a_bits_param; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size_0 = nodeOut_a_bits_size; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source_0 = nodeOut_a_bits_source; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address_0 = nodeOut_a_bits_address; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask_0 = nodeOut_a_bits_mask; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data_0 = nodeOut_a_bits_data; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt_0 = nodeOut_a_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_d_ready_0 = nodeOut_d_ready; // @[Buffer.scala:40:9]
TLMonitor_21 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (nodeIn_a_ready), // @[MixedNode.scala:551:17]
.io_in_a_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_in_a_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_a_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_in_a_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_in_a_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_in_a_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_in_a_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_in_a_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_in_a_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_d_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_in_d_valid (nodeIn_d_valid), // @[MixedNode.scala:551:17]
.io_in_d_bits_opcode (nodeIn_d_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_d_bits_param (nodeIn_d_bits_param), // @[MixedNode.scala:551:17]
.io_in_d_bits_size (nodeIn_d_bits_size), // @[MixedNode.scala:551:17]
.io_in_d_bits_source (nodeIn_d_bits_source), // @[MixedNode.scala:551:17]
.io_in_d_bits_sink (nodeIn_d_bits_sink), // @[MixedNode.scala:551:17]
.io_in_d_bits_denied (nodeIn_d_bits_denied), // @[MixedNode.scala:551:17]
.io_in_d_bits_data (nodeIn_d_bits_data), // @[MixedNode.scala:551:17]
.io_in_d_bits_corrupt (nodeIn_d_bits_corrupt) // @[MixedNode.scala:551:17]
); // @[Nodes.scala:27:25]
Queue2_TLBundleA_a29d64s10k1z3u nodeOut_a_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeIn_a_ready),
.io_enq_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_enq_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_enq_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_enq_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_enq_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_enq_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_enq_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_enq_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_enq_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_deq_ready (nodeOut_a_ready), // @[MixedNode.scala:542:17]
.io_deq_valid (nodeOut_a_valid),
.io_deq_bits_opcode (nodeOut_a_bits_opcode),
.io_deq_bits_param (nodeOut_a_bits_param),
.io_deq_bits_size (nodeOut_a_bits_size),
.io_deq_bits_source (nodeOut_a_bits_source),
.io_deq_bits_address (nodeOut_a_bits_address),
.io_deq_bits_mask (nodeOut_a_bits_mask),
.io_deq_bits_data (nodeOut_a_bits_data),
.io_deq_bits_corrupt (nodeOut_a_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue2_TLBundleD_a29d64s10k1z3u nodeIn_d_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeOut_d_ready),
.io_enq_valid (nodeOut_d_valid), // @[MixedNode.scala:542:17]
.io_enq_bits_opcode (nodeOut_d_bits_opcode), // @[MixedNode.scala:542:17]
.io_enq_bits_size (nodeOut_d_bits_size), // @[MixedNode.scala:542:17]
.io_enq_bits_source (nodeOut_d_bits_source), // @[MixedNode.scala:542:17]
.io_enq_bits_data (nodeOut_d_bits_data), // @[MixedNode.scala:542:17]
.io_deq_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_deq_valid (nodeIn_d_valid),
.io_deq_bits_opcode (nodeIn_d_bits_opcode),
.io_deq_bits_param (nodeIn_d_bits_param),
.io_deq_bits_size (nodeIn_d_bits_size),
.io_deq_bits_source (nodeIn_d_bits_source),
.io_deq_bits_sink (nodeIn_d_bits_sink),
.io_deq_bits_denied (nodeIn_d_bits_denied),
.io_deq_bits_data (nodeIn_d_bits_data),
.io_deq_bits_corrupt (nodeIn_d_bits_corrupt)
); // @[Decoupled.scala:362:21]
assign auto_in_a_ready = auto_in_a_ready_0; // @[Buffer.scala:40:9]
assign auto_in_d_valid = auto_in_d_valid_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode = auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param = auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size = auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source = auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink = auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied = auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data = auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt = auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_a_valid = auto_out_a_valid_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode = auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param = auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size = auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source = auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address = auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask = auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data = auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt = auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_d_ready = auto_out_d_ready_0; // @[Buffer.scala:40:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File DescribedSRAM.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3.{Data, SyncReadMem, Vec}
import chisel3.util.log2Ceil
object DescribedSRAM {
def apply[T <: Data](
name: String,
desc: String,
size: BigInt, // depth
data: T
): SyncReadMem[T] = {
val mem = SyncReadMem(size, data)
mem.suggestName(name)
val granWidth = data match {
case v: Vec[_] => v.head.getWidth
case d => d.getWidth
}
val uid = 0
Annotated.srams(
component = mem,
name = name,
address_width = log2Ceil(size),
data_width = data.getWidth,
depth = size,
description = desc,
write_mask_granularity = granWidth
)
mem
}
}
| module dataArrayB0Way_6( // @[DescribedSRAM.scala:17:26]
input [7:0] RW0_addr,
input RW0_en,
input RW0_clk,
input RW0_wmode,
input [63:0] RW0_wdata,
output [63:0] RW0_rdata
);
dataArrayB0Way_0_ext dataArrayB0Way_0_ext ( // @[DescribedSRAM.scala:17:26]
.RW0_addr (RW0_addr),
.RW0_en (RW0_en),
.RW0_clk (RW0_clk),
.RW0_wmode (RW0_wmode),
.RW0_wdata (RW0_wdata),
.RW0_rdata (RW0_rdata)
); // @[DescribedSRAM.scala:17:26]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_45( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_55 io_out_sink_valid_1 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File FPU.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tile
import chisel3._
import chisel3.util._
import chisel3.{DontCare, WireInit, withClock, withReset}
import chisel3.experimental.SourceInfo
import chisel3.experimental.dataview._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.rocket._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property
case class FPUParams(
minFLen: Int = 32,
fLen: Int = 64,
divSqrt: Boolean = true,
sfmaLatency: Int = 3,
dfmaLatency: Int = 4,
fpmuLatency: Int = 2,
ifpuLatency: Int = 2
)
object FPConstants
{
val RM_SZ = 3
val FLAGS_SZ = 5
}
trait HasFPUCtrlSigs {
val ldst = Bool()
val wen = Bool()
val ren1 = Bool()
val ren2 = Bool()
val ren3 = Bool()
val swap12 = Bool()
val swap23 = Bool()
val typeTagIn = UInt(2.W)
val typeTagOut = UInt(2.W)
val fromint = Bool()
val toint = Bool()
val fastpipe = Bool()
val fma = Bool()
val div = Bool()
val sqrt = Bool()
val wflags = Bool()
val vec = Bool()
}
class FPUCtrlSigs extends Bundle with HasFPUCtrlSigs
class FPUDecoder(implicit p: Parameters) extends FPUModule()(p) {
val io = IO(new Bundle {
val inst = Input(Bits(32.W))
val sigs = Output(new FPUCtrlSigs())
})
private val X2 = BitPat.dontCare(2)
val default = List(X,X,X,X,X,X,X,X2,X2,X,X,X,X,X,X,X,N)
val h: Array[(BitPat, List[BitPat])] =
Array(FLH -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSH -> List(Y,N,N,Y,N,Y,X, I, H,N,Y,N,N,N,N,N,N),
FMV_H_X -> List(N,Y,N,N,N,X,X, H, I,Y,N,N,N,N,N,N,N),
FCVT_H_W -> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_WU-> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_L -> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_LU-> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FMV_X_H -> List(N,N,Y,N,N,N,X, I, H,N,Y,N,N,N,N,N,N),
FCLASS_H -> List(N,N,Y,N,N,N,X, H, H,N,Y,N,N,N,N,N,N),
FCVT_W_H -> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_H-> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_H -> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_H-> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_S_H -> List(N,Y,Y,N,N,N,X, H, S,N,N,Y,N,N,N,Y,N),
FCVT_H_S -> List(N,Y,Y,N,N,N,X, S, H,N,N,Y,N,N,N,Y,N),
FEQ_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FLT_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FLE_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FSGNJ_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FSGNJN_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FSGNJX_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FMIN_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,Y,N),
FMAX_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,Y,N),
FADD_H -> List(N,Y,Y,Y,N,N,Y, H, H,N,N,N,Y,N,N,Y,N),
FSUB_H -> List(N,Y,Y,Y,N,N,Y, H, H,N,N,N,Y,N,N,Y,N),
FMUL_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,N,Y,N,N,Y,N),
FMADD_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FMSUB_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FNMADD_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FNMSUB_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FDIV_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,N,N,Y,N,Y,N),
FSQRT_H -> List(N,Y,Y,N,N,N,X, H, H,N,N,N,N,N,Y,Y,N))
val f: Array[(BitPat, List[BitPat])] =
Array(FLW -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSW -> List(Y,N,N,Y,N,Y,X, I, S,N,Y,N,N,N,N,N,N),
FMV_W_X -> List(N,Y,N,N,N,X,X, S, I,Y,N,N,N,N,N,N,N),
FCVT_S_W -> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_WU-> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_L -> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_LU-> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FMV_X_W -> List(N,N,Y,N,N,N,X, I, S,N,Y,N,N,N,N,N,N),
FCLASS_S -> List(N,N,Y,N,N,N,X, S, S,N,Y,N,N,N,N,N,N),
FCVT_W_S -> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_S-> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_S -> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_S-> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FEQ_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FLT_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FLE_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FSGNJ_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FSGNJN_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FSGNJX_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FMIN_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,Y,N),
FMAX_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,Y,N),
FADD_S -> List(N,Y,Y,Y,N,N,Y, S, S,N,N,N,Y,N,N,Y,N),
FSUB_S -> List(N,Y,Y,Y,N,N,Y, S, S,N,N,N,Y,N,N,Y,N),
FMUL_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,N,Y,N,N,Y,N),
FMADD_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FMSUB_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FNMADD_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FNMSUB_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FDIV_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,N,N,Y,N,Y,N),
FSQRT_S -> List(N,Y,Y,N,N,N,X, S, S,N,N,N,N,N,Y,Y,N))
val d: Array[(BitPat, List[BitPat])] =
Array(FLD -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSD -> List(Y,N,N,Y,N,Y,X, I, D,N,Y,N,N,N,N,N,N),
FMV_D_X -> List(N,Y,N,N,N,X,X, D, I,Y,N,N,N,N,N,N,N),
FCVT_D_W -> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_WU-> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_L -> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_LU-> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FMV_X_D -> List(N,N,Y,N,N,N,X, I, D,N,Y,N,N,N,N,N,N),
FCLASS_D -> List(N,N,Y,N,N,N,X, D, D,N,Y,N,N,N,N,N,N),
FCVT_W_D -> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_D-> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_D -> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_D-> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_S_D -> List(N,Y,Y,N,N,N,X, D, S,N,N,Y,N,N,N,Y,N),
FCVT_D_S -> List(N,Y,Y,N,N,N,X, S, D,N,N,Y,N,N,N,Y,N),
FEQ_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FLT_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FLE_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FSGNJ_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FSGNJN_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FSGNJX_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FMIN_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,Y,N),
FMAX_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,Y,N),
FADD_D -> List(N,Y,Y,Y,N,N,Y, D, D,N,N,N,Y,N,N,Y,N),
FSUB_D -> List(N,Y,Y,Y,N,N,Y, D, D,N,N,N,Y,N,N,Y,N),
FMUL_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,N,Y,N,N,Y,N),
FMADD_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FMSUB_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FNMADD_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FNMSUB_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FDIV_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,N,N,Y,N,Y,N),
FSQRT_D -> List(N,Y,Y,N,N,N,X, D, D,N,N,N,N,N,Y,Y,N))
val fcvt_hd: Array[(BitPat, List[BitPat])] =
Array(FCVT_H_D -> List(N,Y,Y,N,N,N,X, D, H,N,N,Y,N,N,N,Y,N),
FCVT_D_H -> List(N,Y,Y,N,N,N,X, H, D,N,N,Y,N,N,N,Y,N))
val vfmv_f_s: Array[(BitPat, List[BitPat])] =
Array(VFMV_F_S -> List(N,Y,N,N,N,N,X,X2,X2,N,N,N,N,N,N,N,Y))
val insns = ((minFLen, fLen) match {
case (32, 32) => f
case (16, 32) => h ++ f
case (32, 64) => f ++ d
case (16, 64) => h ++ f ++ d ++ fcvt_hd
case other => throw new Exception(s"minFLen = ${minFLen} & fLen = ${fLen} is an unsupported configuration")
}) ++ (if (usingVector) vfmv_f_s else Array[(BitPat, List[BitPat])]())
val decoder = DecodeLogic(io.inst, default, insns)
val s = io.sigs
val sigs = Seq(s.ldst, s.wen, s.ren1, s.ren2, s.ren3, s.swap12,
s.swap23, s.typeTagIn, s.typeTagOut, s.fromint, s.toint,
s.fastpipe, s.fma, s.div, s.sqrt, s.wflags, s.vec)
sigs zip decoder map {case(s,d) => s := d}
}
class FPUCoreIO(implicit p: Parameters) extends CoreBundle()(p) {
val hartid = Input(UInt(hartIdLen.W))
val time = Input(UInt(xLen.W))
val inst = Input(Bits(32.W))
val fromint_data = Input(Bits(xLen.W))
val fcsr_rm = Input(Bits(FPConstants.RM_SZ.W))
val fcsr_flags = Valid(Bits(FPConstants.FLAGS_SZ.W))
val v_sew = Input(UInt(3.W))
val store_data = Output(Bits(fLen.W))
val toint_data = Output(Bits(xLen.W))
val ll_resp_val = Input(Bool())
val ll_resp_type = Input(Bits(3.W))
val ll_resp_tag = Input(UInt(5.W))
val ll_resp_data = Input(Bits(fLen.W))
val valid = Input(Bool())
val fcsr_rdy = Output(Bool())
val nack_mem = Output(Bool())
val illegal_rm = Output(Bool())
val killx = Input(Bool())
val killm = Input(Bool())
val dec = Output(new FPUCtrlSigs())
val sboard_set = Output(Bool())
val sboard_clr = Output(Bool())
val sboard_clra = Output(UInt(5.W))
val keep_clock_enabled = Input(Bool())
}
class FPUIO(implicit p: Parameters) extends FPUCoreIO ()(p) {
val cp_req = Flipped(Decoupled(new FPInput())) //cp doesn't pay attn to kill sigs
val cp_resp = Decoupled(new FPResult())
}
class FPResult(implicit p: Parameters) extends CoreBundle()(p) {
val data = Bits((fLen+1).W)
val exc = Bits(FPConstants.FLAGS_SZ.W)
}
class IntToFPInput(implicit p: Parameters) extends CoreBundle()(p) with HasFPUCtrlSigs {
val rm = Bits(FPConstants.RM_SZ.W)
val typ = Bits(2.W)
val in1 = Bits(xLen.W)
}
class FPInput(implicit p: Parameters) extends CoreBundle()(p) with HasFPUCtrlSigs {
val rm = Bits(FPConstants.RM_SZ.W)
val fmaCmd = Bits(2.W)
val typ = Bits(2.W)
val fmt = Bits(2.W)
val in1 = Bits((fLen+1).W)
val in2 = Bits((fLen+1).W)
val in3 = Bits((fLen+1).W)
}
case class FType(exp: Int, sig: Int) {
def ieeeWidth = exp + sig
def recodedWidth = ieeeWidth + 1
def ieeeQNaN = ((BigInt(1) << (ieeeWidth - 1)) - (BigInt(1) << (sig - 2))).U(ieeeWidth.W)
def qNaN = ((BigInt(7) << (exp + sig - 3)) + (BigInt(1) << (sig - 2))).U(recodedWidth.W)
def isNaN(x: UInt) = x(sig + exp - 1, sig + exp - 3).andR
def isSNaN(x: UInt) = isNaN(x) && !x(sig - 2)
def classify(x: UInt) = {
val sign = x(sig + exp)
val code = x(exp + sig - 1, exp + sig - 3)
val codeHi = code(2, 1)
val isSpecial = codeHi === 3.U
val isHighSubnormalIn = x(exp + sig - 3, sig - 1) < 2.U
val isSubnormal = code === 1.U || codeHi === 1.U && isHighSubnormalIn
val isNormal = codeHi === 1.U && !isHighSubnormalIn || codeHi === 2.U
val isZero = code === 0.U
val isInf = isSpecial && !code(0)
val isNaN = code.andR
val isSNaN = isNaN && !x(sig-2)
val isQNaN = isNaN && x(sig-2)
Cat(isQNaN, isSNaN, isInf && !sign, isNormal && !sign,
isSubnormal && !sign, isZero && !sign, isZero && sign,
isSubnormal && sign, isNormal && sign, isInf && sign)
}
// convert between formats, ignoring rounding, range, NaN
def unsafeConvert(x: UInt, to: FType) = if (this == to) x else {
val sign = x(sig + exp)
val fractIn = x(sig - 2, 0)
val expIn = x(sig + exp - 1, sig - 1)
val fractOut = fractIn << to.sig >> sig
val expOut = {
val expCode = expIn(exp, exp - 2)
val commonCase = (expIn + (1 << to.exp).U) - (1 << exp).U
Mux(expCode === 0.U || expCode >= 6.U, Cat(expCode, commonCase(to.exp - 3, 0)), commonCase(to.exp, 0))
}
Cat(sign, expOut, fractOut)
}
private def ieeeBundle = {
val expWidth = exp
class IEEEBundle extends Bundle {
val sign = Bool()
val exp = UInt(expWidth.W)
val sig = UInt((ieeeWidth-expWidth-1).W)
}
new IEEEBundle
}
def unpackIEEE(x: UInt) = x.asTypeOf(ieeeBundle)
def recode(x: UInt) = hardfloat.recFNFromFN(exp, sig, x)
def ieee(x: UInt) = hardfloat.fNFromRecFN(exp, sig, x)
}
object FType {
val H = new FType(5, 11)
val S = new FType(8, 24)
val D = new FType(11, 53)
val all = List(H, S, D)
}
trait HasFPUParameters {
require(fLen == 0 || FType.all.exists(_.ieeeWidth == fLen))
val minFLen: Int
val fLen: Int
def xLen: Int
val minXLen = 32
val nIntTypes = log2Ceil(xLen/minXLen) + 1
def floatTypes = FType.all.filter(t => minFLen <= t.ieeeWidth && t.ieeeWidth <= fLen)
def minType = floatTypes.head
def maxType = floatTypes.last
def prevType(t: FType) = floatTypes(typeTag(t) - 1)
def maxExpWidth = maxType.exp
def maxSigWidth = maxType.sig
def typeTag(t: FType) = floatTypes.indexOf(t)
def typeTagWbOffset = (FType.all.indexOf(minType) + 1).U
def typeTagGroup(t: FType) = (if (floatTypes.contains(t)) typeTag(t) else typeTag(maxType)).U
// typeTag
def H = typeTagGroup(FType.H)
def S = typeTagGroup(FType.S)
def D = typeTagGroup(FType.D)
def I = typeTag(maxType).U
private def isBox(x: UInt, t: FType): Bool = x(t.sig + t.exp, t.sig + t.exp - 4).andR
private def box(x: UInt, xt: FType, y: UInt, yt: FType): UInt = {
require(xt.ieeeWidth == 2 * yt.ieeeWidth)
val swizzledNaN = Cat(
x(xt.sig + xt.exp, xt.sig + xt.exp - 3),
x(xt.sig - 2, yt.recodedWidth - 1).andR,
x(xt.sig + xt.exp - 5, xt.sig),
y(yt.recodedWidth - 2),
x(xt.sig - 2, yt.recodedWidth - 1),
y(yt.recodedWidth - 1),
y(yt.recodedWidth - 3, 0))
Mux(xt.isNaN(x), swizzledNaN, x)
}
// implement NaN unboxing for FU inputs
def unbox(x: UInt, tag: UInt, exactType: Option[FType]): UInt = {
val outType = exactType.getOrElse(maxType)
def helper(x: UInt, t: FType): Seq[(Bool, UInt)] = {
val prev =
if (t == minType) {
Seq()
} else {
val prevT = prevType(t)
val unswizzled = Cat(
x(prevT.sig + prevT.exp - 1),
x(t.sig - 1),
x(prevT.sig + prevT.exp - 2, 0))
val prev = helper(unswizzled, prevT)
val isbox = isBox(x, t)
prev.map(p => (isbox && p._1, p._2))
}
prev :+ (true.B, t.unsafeConvert(x, outType))
}
val (oks, floats) = helper(x, maxType).unzip
if (exactType.isEmpty || floatTypes.size == 1) {
Mux(oks(tag), floats(tag), maxType.qNaN)
} else {
val t = exactType.get
floats(typeTag(t)) | Mux(oks(typeTag(t)), 0.U, t.qNaN)
}
}
// make sure that the redundant bits in the NaN-boxed encoding are consistent
def consistent(x: UInt): Bool = {
def helper(x: UInt, t: FType): Bool = if (typeTag(t) == 0) true.B else {
val prevT = prevType(t)
val unswizzled = Cat(
x(prevT.sig + prevT.exp - 1),
x(t.sig - 1),
x(prevT.sig + prevT.exp - 2, 0))
val prevOK = !isBox(x, t) || helper(unswizzled, prevT)
val curOK = !t.isNaN(x) || x(t.sig + t.exp - 4) === x(t.sig - 2, prevT.recodedWidth - 1).andR
prevOK && curOK
}
helper(x, maxType)
}
// generate a NaN box from an FU result
def box(x: UInt, t: FType): UInt = {
if (t == maxType) {
x
} else {
val nt = floatTypes(typeTag(t) + 1)
val bigger = box(((BigInt(1) << nt.recodedWidth)-1).U, nt, x, t)
bigger | ((BigInt(1) << maxType.recodedWidth) - (BigInt(1) << nt.recodedWidth)).U
}
}
// generate a NaN box from an FU result
def box(x: UInt, tag: UInt): UInt = {
val opts = floatTypes.map(t => box(x, t))
opts(tag)
}
// zap bits that hardfloat thinks are don't-cares, but we do care about
def sanitizeNaN(x: UInt, t: FType): UInt = {
if (typeTag(t) == 0) {
x
} else {
val maskedNaN = x & ~((BigInt(1) << (t.sig-1)) | (BigInt(1) << (t.sig+t.exp-4))).U(t.recodedWidth.W)
Mux(t.isNaN(x), maskedNaN, x)
}
}
// implement NaN boxing and recoding for FL*/fmv.*.x
def recode(x: UInt, tag: UInt): UInt = {
def helper(x: UInt, t: FType): UInt = {
if (typeTag(t) == 0) {
t.recode(x)
} else {
val prevT = prevType(t)
box(t.recode(x), t, helper(x, prevT), prevT)
}
}
// fill MSBs of subword loads to emulate a wider load of a NaN-boxed value
val boxes = floatTypes.map(t => ((BigInt(1) << maxType.ieeeWidth) - (BigInt(1) << t.ieeeWidth)).U)
helper(boxes(tag) | x, maxType)
}
// implement NaN unboxing and un-recoding for FS*/fmv.x.*
def ieee(x: UInt, t: FType = maxType): UInt = {
if (typeTag(t) == 0) {
t.ieee(x)
} else {
val unrecoded = t.ieee(x)
val prevT = prevType(t)
val prevRecoded = Cat(
x(prevT.recodedWidth-2),
x(t.sig-1),
x(prevT.recodedWidth-3, 0))
val prevUnrecoded = ieee(prevRecoded, prevT)
Cat(unrecoded >> prevT.ieeeWidth, Mux(t.isNaN(x), prevUnrecoded, unrecoded(prevT.ieeeWidth-1, 0)))
}
}
}
abstract class FPUModule(implicit val p: Parameters) extends Module with HasCoreParameters with HasFPUParameters
class FPToInt(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
class Output extends Bundle {
val in = new FPInput
val lt = Bool()
val store = Bits(fLen.W)
val toint = Bits(xLen.W)
val exc = Bits(FPConstants.FLAGS_SZ.W)
}
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new Output)
})
val in = RegEnable(io.in.bits, io.in.valid)
val valid = RegNext(io.in.valid)
val dcmp = Module(new hardfloat.CompareRecFN(maxExpWidth, maxSigWidth))
dcmp.io.a := in.in1
dcmp.io.b := in.in2
dcmp.io.signaling := !in.rm(1)
val tag = in.typeTagOut
val toint_ieee = (floatTypes.map(t => if (t == FType.H) Fill(maxType.ieeeWidth / minXLen, ieee(in.in1)(15, 0).sextTo(minXLen))
else Fill(maxType.ieeeWidth / t.ieeeWidth, ieee(in.in1)(t.ieeeWidth - 1, 0))): Seq[UInt])(tag)
val toint = WireDefault(toint_ieee)
val intType = WireDefault(in.fmt(0))
io.out.bits.store := (floatTypes.map(t => Fill(fLen / t.ieeeWidth, ieee(in.in1)(t.ieeeWidth - 1, 0))): Seq[UInt])(tag)
io.out.bits.toint := ((0 until nIntTypes).map(i => toint((minXLen << i) - 1, 0).sextTo(xLen)): Seq[UInt])(intType)
io.out.bits.exc := 0.U
when (in.rm(0)) {
val classify_out = (floatTypes.map(t => t.classify(maxType.unsafeConvert(in.in1, t))): Seq[UInt])(tag)
toint := classify_out | (toint_ieee >> minXLen << minXLen)
intType := false.B
}
when (in.wflags) { // feq/flt/fle, fcvt
toint := (~in.rm & Cat(dcmp.io.lt, dcmp.io.eq)).orR | (toint_ieee >> minXLen << minXLen)
io.out.bits.exc := dcmp.io.exceptionFlags
intType := false.B
when (!in.ren2) { // fcvt
val cvtType = in.typ.extract(log2Ceil(nIntTypes), 1)
intType := cvtType
val conv = Module(new hardfloat.RecFNToIN(maxExpWidth, maxSigWidth, xLen))
conv.io.in := in.in1
conv.io.roundingMode := in.rm
conv.io.signedOut := ~in.typ(0)
toint := conv.io.out
io.out.bits.exc := Cat(conv.io.intExceptionFlags(2, 1).orR, 0.U(3.W), conv.io.intExceptionFlags(0))
for (i <- 0 until nIntTypes-1) {
val w = minXLen << i
when (cvtType === i.U) {
val narrow = Module(new hardfloat.RecFNToIN(maxExpWidth, maxSigWidth, w))
narrow.io.in := in.in1
narrow.io.roundingMode := in.rm
narrow.io.signedOut := ~in.typ(0)
val excSign = in.in1(maxExpWidth + maxSigWidth) && !maxType.isNaN(in.in1)
val excOut = Cat(conv.io.signedOut === excSign, Fill(w-1, !excSign))
val invalid = conv.io.intExceptionFlags(2) || narrow.io.intExceptionFlags(1)
when (invalid) { toint := Cat(conv.io.out >> w, excOut) }
io.out.bits.exc := Cat(invalid, 0.U(3.W), !invalid && conv.io.intExceptionFlags(0))
}
}
}
}
io.out.valid := valid
io.out.bits.lt := dcmp.io.lt || (dcmp.io.a.asSInt < 0.S && dcmp.io.b.asSInt >= 0.S)
io.out.bits.in := in
}
class IntToFP(val latency: Int)(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
val io = IO(new Bundle {
val in = Flipped(Valid(new IntToFPInput))
val out = Valid(new FPResult)
})
val in = Pipe(io.in)
val tag = in.bits.typeTagIn
val mux = Wire(new FPResult)
mux.exc := 0.U
mux.data := recode(in.bits.in1, tag)
val intValue = {
val res = WireDefault(in.bits.in1.asSInt)
for (i <- 0 until nIntTypes-1) {
val smallInt = in.bits.in1((minXLen << i) - 1, 0)
when (in.bits.typ.extract(log2Ceil(nIntTypes), 1) === i.U) {
res := Mux(in.bits.typ(0), smallInt.zext, smallInt.asSInt)
}
}
res.asUInt
}
when (in.bits.wflags) { // fcvt
// could be improved for RVD/RVQ with a single variable-position rounding
// unit, rather than N fixed-position ones
val i2fResults = for (t <- floatTypes) yield {
val i2f = Module(new hardfloat.INToRecFN(xLen, t.exp, t.sig))
i2f.io.signedIn := ~in.bits.typ(0)
i2f.io.in := intValue
i2f.io.roundingMode := in.bits.rm
i2f.io.detectTininess := hardfloat.consts.tininess_afterRounding
(sanitizeNaN(i2f.io.out, t), i2f.io.exceptionFlags)
}
val (data, exc) = i2fResults.unzip
val dataPadded = data.init.map(d => Cat(data.last >> d.getWidth, d)) :+ data.last
mux.data := dataPadded(tag)
mux.exc := exc(tag)
}
io.out <> Pipe(in.valid, mux, latency-1)
}
class FPToFP(val latency: Int)(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new FPResult)
val lt = Input(Bool()) // from FPToInt
})
val in = Pipe(io.in)
val signNum = Mux(in.bits.rm(1), in.bits.in1 ^ in.bits.in2, Mux(in.bits.rm(0), ~in.bits.in2, in.bits.in2))
val fsgnj = Cat(signNum(fLen), in.bits.in1(fLen-1, 0))
val fsgnjMux = Wire(new FPResult)
fsgnjMux.exc := 0.U
fsgnjMux.data := fsgnj
when (in.bits.wflags) { // fmin/fmax
val isnan1 = maxType.isNaN(in.bits.in1)
val isnan2 = maxType.isNaN(in.bits.in2)
val isInvalid = maxType.isSNaN(in.bits.in1) || maxType.isSNaN(in.bits.in2)
val isNaNOut = isnan1 && isnan2
val isLHS = isnan2 || in.bits.rm(0) =/= io.lt && !isnan1
fsgnjMux.exc := isInvalid << 4
fsgnjMux.data := Mux(isNaNOut, maxType.qNaN, Mux(isLHS, in.bits.in1, in.bits.in2))
}
val inTag = in.bits.typeTagIn
val outTag = in.bits.typeTagOut
val mux = WireDefault(fsgnjMux)
for (t <- floatTypes.init) {
when (outTag === typeTag(t).U) {
mux.data := Cat(fsgnjMux.data >> t.recodedWidth, maxType.unsafeConvert(fsgnjMux.data, t))
}
}
when (in.bits.wflags && !in.bits.ren2) { // fcvt
if (floatTypes.size > 1) {
// widening conversions simply canonicalize NaN operands
val widened = Mux(maxType.isNaN(in.bits.in1), maxType.qNaN, in.bits.in1)
fsgnjMux.data := widened
fsgnjMux.exc := maxType.isSNaN(in.bits.in1) << 4
// narrowing conversions require rounding (for RVQ, this could be
// optimized to use a single variable-position rounding unit, rather
// than two fixed-position ones)
for (outType <- floatTypes.init) when (outTag === typeTag(outType).U && ((typeTag(outType) == 0).B || outTag < inTag)) {
val narrower = Module(new hardfloat.RecFNToRecFN(maxType.exp, maxType.sig, outType.exp, outType.sig))
narrower.io.in := in.bits.in1
narrower.io.roundingMode := in.bits.rm
narrower.io.detectTininess := hardfloat.consts.tininess_afterRounding
val narrowed = sanitizeNaN(narrower.io.out, outType)
mux.data := Cat(fsgnjMux.data >> narrowed.getWidth, narrowed)
mux.exc := narrower.io.exceptionFlags
}
}
}
io.out <> Pipe(in.valid, mux, latency-1)
}
class MulAddRecFNPipe(latency: Int, expWidth: Int, sigWidth: Int) extends Module
{
override def desiredName = s"MulAddRecFNPipe_l${latency}_e${expWidth}_s${sigWidth}"
require(latency<=2)
val io = IO(new Bundle {
val validin = Input(Bool())
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
val validout = Output(Bool())
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul = Module(new hardfloat.MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul = Module(new hardfloat.MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
val valid_stage0 = Wire(Bool())
val roundingMode_stage0 = Wire(UInt(3.W))
val detectTininess_stage0 = Wire(UInt(1.W))
val postmul_regs = if(latency>0) 1 else 0
mulAddRecFNToRaw_postMul.io.fromPreMul := Pipe(io.validin, mulAddRecFNToRaw_preMul.io.toPostMul, postmul_regs).bits
mulAddRecFNToRaw_postMul.io.mulAddResult := Pipe(io.validin, mulAddResult, postmul_regs).bits
mulAddRecFNToRaw_postMul.io.roundingMode := Pipe(io.validin, io.roundingMode, postmul_regs).bits
roundingMode_stage0 := Pipe(io.validin, io.roundingMode, postmul_regs).bits
detectTininess_stage0 := Pipe(io.validin, io.detectTininess, postmul_regs).bits
valid_stage0 := Pipe(io.validin, false.B, postmul_regs).valid
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN = Module(new hardfloat.RoundRawFNToRecFN(expWidth, sigWidth, 0))
val round_regs = if(latency==2) 1 else 0
roundRawFNToRecFN.io.invalidExc := Pipe(valid_stage0, mulAddRecFNToRaw_postMul.io.invalidExc, round_regs).bits
roundRawFNToRecFN.io.in := Pipe(valid_stage0, mulAddRecFNToRaw_postMul.io.rawOut, round_regs).bits
roundRawFNToRecFN.io.roundingMode := Pipe(valid_stage0, roundingMode_stage0, round_regs).bits
roundRawFNToRecFN.io.detectTininess := Pipe(valid_stage0, detectTininess_stage0, round_regs).bits
io.validout := Pipe(valid_stage0, false.B, round_regs).valid
roundRawFNToRecFN.io.infiniteExc := false.B
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
class FPUFMAPipe(val latency: Int, val t: FType)
(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
override def desiredName = s"FPUFMAPipe_l${latency}_f${t.ieeeWidth}"
require(latency>0)
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new FPResult)
})
val valid = RegNext(io.in.valid)
val in = Reg(new FPInput)
when (io.in.valid) {
val one = 1.U << (t.sig + t.exp - 1)
val zero = (io.in.bits.in1 ^ io.in.bits.in2) & (1.U << (t.sig + t.exp))
val cmd_fma = io.in.bits.ren3
val cmd_addsub = io.in.bits.swap23
in := io.in.bits
when (cmd_addsub) { in.in2 := one }
when (!(cmd_fma || cmd_addsub)) { in.in3 := zero }
}
val fma = Module(new MulAddRecFNPipe((latency-1) min 2, t.exp, t.sig))
fma.io.validin := valid
fma.io.op := in.fmaCmd
fma.io.roundingMode := in.rm
fma.io.detectTininess := hardfloat.consts.tininess_afterRounding
fma.io.a := in.in1
fma.io.b := in.in2
fma.io.c := in.in3
val res = Wire(new FPResult)
res.data := sanitizeNaN(fma.io.out, t)
res.exc := fma.io.exceptionFlags
io.out := Pipe(fma.io.validout, res, (latency-3) max 0)
}
class FPU(cfg: FPUParams)(implicit p: Parameters) extends FPUModule()(p) {
val io = IO(new FPUIO)
val (useClockGating, useDebugROB) = coreParams match {
case r: RocketCoreParams =>
val sz = if (r.debugROB.isDefined) r.debugROB.get.size else 1
(r.clockGate, sz < 1)
case _ => (false, false)
}
val clock_en_reg = Reg(Bool())
val clock_en = clock_en_reg || io.cp_req.valid
val gated_clock =
if (!useClockGating) clock
else ClockGate(clock, clock_en, "fpu_clock_gate")
val fp_decoder = Module(new FPUDecoder)
fp_decoder.io.inst := io.inst
val id_ctrl = WireInit(fp_decoder.io.sigs)
coreParams match { case r: RocketCoreParams => r.vector.map(v => {
val v_decode = v.decoder(p) // Only need to get ren1
v_decode.io.inst := io.inst
v_decode.io.vconfig := DontCare // core deals with this
when (v_decode.io.legal && v_decode.io.read_frs1) {
id_ctrl.ren1 := true.B
id_ctrl.swap12 := false.B
id_ctrl.toint := true.B
id_ctrl.typeTagIn := I
id_ctrl.typeTagOut := Mux(io.v_sew === 3.U, D, S)
}
when (v_decode.io.write_frd) { id_ctrl.wen := true.B }
})}
val ex_reg_valid = RegNext(io.valid, false.B)
val ex_reg_inst = RegEnable(io.inst, io.valid)
val ex_reg_ctrl = RegEnable(id_ctrl, io.valid)
val ex_ra = List.fill(3)(Reg(UInt()))
// load/vector response
val load_wb = RegNext(io.ll_resp_val)
val load_wb_typeTag = RegEnable(io.ll_resp_type(1,0) - typeTagWbOffset, io.ll_resp_val)
val load_wb_data = RegEnable(io.ll_resp_data, io.ll_resp_val)
val load_wb_tag = RegEnable(io.ll_resp_tag, io.ll_resp_val)
class FPUImpl { // entering gated-clock domain
val req_valid = ex_reg_valid || io.cp_req.valid
val ex_cp_valid = io.cp_req.fire
val mem_cp_valid = RegNext(ex_cp_valid, false.B)
val wb_cp_valid = RegNext(mem_cp_valid, false.B)
val mem_reg_valid = RegInit(false.B)
val killm = (io.killm || io.nack_mem) && !mem_cp_valid
// Kill X-stage instruction if M-stage is killed. This prevents it from
// speculatively being sent to the div-sqrt unit, which can cause priority
// inversion for two back-to-back divides, the first of which is killed.
val killx = io.killx || mem_reg_valid && killm
mem_reg_valid := ex_reg_valid && !killx || ex_cp_valid
val mem_reg_inst = RegEnable(ex_reg_inst, ex_reg_valid)
val wb_reg_valid = RegNext(mem_reg_valid && (!killm || mem_cp_valid), false.B)
val cp_ctrl = Wire(new FPUCtrlSigs)
cp_ctrl :<>= io.cp_req.bits.viewAsSupertype(new FPUCtrlSigs)
io.cp_resp.valid := false.B
io.cp_resp.bits.data := 0.U
io.cp_resp.bits.exc := DontCare
val ex_ctrl = Mux(ex_cp_valid, cp_ctrl, ex_reg_ctrl)
val mem_ctrl = RegEnable(ex_ctrl, req_valid)
val wb_ctrl = RegEnable(mem_ctrl, mem_reg_valid)
// CoreMonitorBundle to monitor fp register file writes
val frfWriteBundle = Seq.fill(2)(WireInit(new CoreMonitorBundle(xLen, fLen), DontCare))
frfWriteBundle.foreach { i =>
i.clock := clock
i.reset := reset
i.hartid := io.hartid
i.timer := io.time(31,0)
i.valid := false.B
i.wrenx := false.B
i.wrenf := false.B
i.excpt := false.B
}
// regfile
val regfile = Mem(32, Bits((fLen+1).W))
when (load_wb) {
val wdata = recode(load_wb_data, load_wb_typeTag)
regfile(load_wb_tag) := wdata
assert(consistent(wdata))
if (enableCommitLog)
printf("f%d p%d 0x%x\n", load_wb_tag, load_wb_tag + 32.U, ieee(wdata))
if (useDebugROB)
DebugROB.pushWb(clock, reset, io.hartid, load_wb, load_wb_tag + 32.U, ieee(wdata))
frfWriteBundle(0).wrdst := load_wb_tag
frfWriteBundle(0).wrenf := true.B
frfWriteBundle(0).wrdata := ieee(wdata)
}
val ex_rs = ex_ra.map(a => regfile(a))
when (io.valid) {
when (id_ctrl.ren1) {
when (!id_ctrl.swap12) { ex_ra(0) := io.inst(19,15) }
when (id_ctrl.swap12) { ex_ra(1) := io.inst(19,15) }
}
when (id_ctrl.ren2) {
when (id_ctrl.swap12) { ex_ra(0) := io.inst(24,20) }
when (id_ctrl.swap23) { ex_ra(2) := io.inst(24,20) }
when (!id_ctrl.swap12 && !id_ctrl.swap23) { ex_ra(1) := io.inst(24,20) }
}
when (id_ctrl.ren3) { ex_ra(2) := io.inst(31,27) }
}
val ex_rm = Mux(ex_reg_inst(14,12) === 7.U, io.fcsr_rm, ex_reg_inst(14,12))
def fuInput(minT: Option[FType]): FPInput = {
val req = Wire(new FPInput)
val tag = ex_ctrl.typeTagIn
req.viewAsSupertype(new Bundle with HasFPUCtrlSigs) :#= ex_ctrl.viewAsSupertype(new Bundle with HasFPUCtrlSigs)
req.rm := ex_rm
req.in1 := unbox(ex_rs(0), tag, minT)
req.in2 := unbox(ex_rs(1), tag, minT)
req.in3 := unbox(ex_rs(2), tag, minT)
req.typ := ex_reg_inst(21,20)
req.fmt := ex_reg_inst(26,25)
req.fmaCmd := ex_reg_inst(3,2) | (!ex_ctrl.ren3 && ex_reg_inst(27))
when (ex_cp_valid) {
req := io.cp_req.bits
when (io.cp_req.bits.swap12) {
req.in1 := io.cp_req.bits.in2
req.in2 := io.cp_req.bits.in1
}
when (io.cp_req.bits.swap23) {
req.in2 := io.cp_req.bits.in3
req.in3 := io.cp_req.bits.in2
}
}
req
}
val sfma = Module(new FPUFMAPipe(cfg.sfmaLatency, FType.S))
sfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === S
sfma.io.in.bits := fuInput(Some(sfma.t))
val fpiu = Module(new FPToInt)
fpiu.io.in.valid := req_valid && (ex_ctrl.toint || ex_ctrl.div || ex_ctrl.sqrt || (ex_ctrl.fastpipe && ex_ctrl.wflags))
fpiu.io.in.bits := fuInput(None)
io.store_data := fpiu.io.out.bits.store
io.toint_data := fpiu.io.out.bits.toint
when(fpiu.io.out.valid && mem_cp_valid && mem_ctrl.toint){
io.cp_resp.bits.data := fpiu.io.out.bits.toint
io.cp_resp.valid := true.B
}
val ifpu = Module(new IntToFP(cfg.ifpuLatency))
ifpu.io.in.valid := req_valid && ex_ctrl.fromint
ifpu.io.in.bits := fpiu.io.in.bits
ifpu.io.in.bits.in1 := Mux(ex_cp_valid, io.cp_req.bits.in1, io.fromint_data)
val fpmu = Module(new FPToFP(cfg.fpmuLatency))
fpmu.io.in.valid := req_valid && ex_ctrl.fastpipe
fpmu.io.in.bits := fpiu.io.in.bits
fpmu.io.lt := fpiu.io.out.bits.lt
val divSqrt_wen = WireDefault(false.B)
val divSqrt_inFlight = WireDefault(false.B)
val divSqrt_waddr = Reg(UInt(5.W))
val divSqrt_cp = Reg(Bool())
val divSqrt_typeTag = Wire(UInt(log2Up(floatTypes.size).W))
val divSqrt_wdata = Wire(UInt((fLen+1).W))
val divSqrt_flags = Wire(UInt(FPConstants.FLAGS_SZ.W))
divSqrt_typeTag := DontCare
divSqrt_wdata := DontCare
divSqrt_flags := DontCare
// writeback arbitration
case class Pipe(p: Module, lat: Int, cond: (FPUCtrlSigs) => Bool, res: FPResult)
val pipes = List(
Pipe(fpmu, fpmu.latency, (c: FPUCtrlSigs) => c.fastpipe, fpmu.io.out.bits),
Pipe(ifpu, ifpu.latency, (c: FPUCtrlSigs) => c.fromint, ifpu.io.out.bits),
Pipe(sfma, sfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === S, sfma.io.out.bits)) ++
(fLen > 32).option({
val dfma = Module(new FPUFMAPipe(cfg.dfmaLatency, FType.D))
dfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === D
dfma.io.in.bits := fuInput(Some(dfma.t))
Pipe(dfma, dfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === D, dfma.io.out.bits)
}) ++
(minFLen == 16).option({
val hfma = Module(new FPUFMAPipe(cfg.sfmaLatency, FType.H))
hfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === H
hfma.io.in.bits := fuInput(Some(hfma.t))
Pipe(hfma, hfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === H, hfma.io.out.bits)
})
def latencyMask(c: FPUCtrlSigs, offset: Int) = {
require(pipes.forall(_.lat >= offset))
pipes.map(p => Mux(p.cond(c), (1 << p.lat-offset).U, 0.U)).reduce(_|_)
}
def pipeid(c: FPUCtrlSigs) = pipes.zipWithIndex.map(p => Mux(p._1.cond(c), p._2.U, 0.U)).reduce(_|_)
val maxLatency = pipes.map(_.lat).max
val memLatencyMask = latencyMask(mem_ctrl, 2)
class WBInfo extends Bundle {
val rd = UInt(5.W)
val typeTag = UInt(log2Up(floatTypes.size).W)
val cp = Bool()
val pipeid = UInt(log2Ceil(pipes.size).W)
}
val wen = RegInit(0.U((maxLatency-1).W))
val wbInfo = Reg(Vec(maxLatency-1, new WBInfo))
val mem_wen = mem_reg_valid && (mem_ctrl.fma || mem_ctrl.fastpipe || mem_ctrl.fromint)
val write_port_busy = RegEnable(mem_wen && (memLatencyMask & latencyMask(ex_ctrl, 1)).orR || (wen & latencyMask(ex_ctrl, 0)).orR, req_valid)
ccover(mem_reg_valid && write_port_busy, "WB_STRUCTURAL", "structural hazard on writeback")
for (i <- 0 until maxLatency-2) {
when (wen(i+1)) { wbInfo(i) := wbInfo(i+1) }
}
wen := wen >> 1
when (mem_wen) {
when (!killm) {
wen := wen >> 1 | memLatencyMask
}
for (i <- 0 until maxLatency-1) {
when (!write_port_busy && memLatencyMask(i)) {
wbInfo(i).cp := mem_cp_valid
wbInfo(i).typeTag := mem_ctrl.typeTagOut
wbInfo(i).pipeid := pipeid(mem_ctrl)
wbInfo(i).rd := mem_reg_inst(11,7)
}
}
}
val waddr = Mux(divSqrt_wen, divSqrt_waddr, wbInfo(0).rd)
val wb_cp = Mux(divSqrt_wen, divSqrt_cp, wbInfo(0).cp)
val wtypeTag = Mux(divSqrt_wen, divSqrt_typeTag, wbInfo(0).typeTag)
val wdata = box(Mux(divSqrt_wen, divSqrt_wdata, (pipes.map(_.res.data): Seq[UInt])(wbInfo(0).pipeid)), wtypeTag)
val wexc = (pipes.map(_.res.exc): Seq[UInt])(wbInfo(0).pipeid)
when ((!wbInfo(0).cp && wen(0)) || divSqrt_wen) {
assert(consistent(wdata))
regfile(waddr) := wdata
if (enableCommitLog) {
printf("f%d p%d 0x%x\n", waddr, waddr + 32.U, ieee(wdata))
}
frfWriteBundle(1).wrdst := waddr
frfWriteBundle(1).wrenf := true.B
frfWriteBundle(1).wrdata := ieee(wdata)
}
if (useDebugROB) {
DebugROB.pushWb(clock, reset, io.hartid, (!wbInfo(0).cp && wen(0)) || divSqrt_wen, waddr + 32.U, ieee(wdata))
}
when (wb_cp && (wen(0) || divSqrt_wen)) {
io.cp_resp.bits.data := wdata
io.cp_resp.valid := true.B
}
assert(!io.cp_req.valid || pipes.forall(_.lat == pipes.head.lat).B,
s"FPU only supports coprocessor if FMA pipes have uniform latency ${pipes.map(_.lat)}")
// Avoid structural hazards and nacking of external requests
// toint responds in the MEM stage, so an incoming toint can induce a structural hazard against inflight FMAs
io.cp_req.ready := !ex_reg_valid && !(cp_ctrl.toint && wen =/= 0.U) && !divSqrt_inFlight
val wb_toint_valid = wb_reg_valid && wb_ctrl.toint
val wb_toint_exc = RegEnable(fpiu.io.out.bits.exc, mem_ctrl.toint)
io.fcsr_flags.valid := wb_toint_valid || divSqrt_wen || wen(0)
io.fcsr_flags.bits :=
Mux(wb_toint_valid, wb_toint_exc, 0.U) |
Mux(divSqrt_wen, divSqrt_flags, 0.U) |
Mux(wen(0), wexc, 0.U)
val divSqrt_write_port_busy = (mem_ctrl.div || mem_ctrl.sqrt) && wen.orR
io.fcsr_rdy := !(ex_reg_valid && ex_ctrl.wflags || mem_reg_valid && mem_ctrl.wflags || wb_reg_valid && wb_ctrl.toint || wen.orR || divSqrt_inFlight)
io.nack_mem := (write_port_busy || divSqrt_write_port_busy || divSqrt_inFlight) && !mem_cp_valid
io.dec <> id_ctrl
def useScoreboard(f: ((Pipe, Int)) => Bool) = pipes.zipWithIndex.filter(_._1.lat > 3).map(x => f(x)).fold(false.B)(_||_)
io.sboard_set := wb_reg_valid && !wb_cp_valid && RegNext(useScoreboard(_._1.cond(mem_ctrl)) || mem_ctrl.div || mem_ctrl.sqrt || mem_ctrl.vec)
io.sboard_clr := !wb_cp_valid && (divSqrt_wen || (wen(0) && useScoreboard(x => wbInfo(0).pipeid === x._2.U)))
io.sboard_clra := waddr
ccover(io.sboard_clr && load_wb, "DUAL_WRITEBACK", "load and FMA writeback on same cycle")
// we don't currently support round-max-magnitude (rm=4)
io.illegal_rm := io.inst(14,12).isOneOf(5.U, 6.U) || io.inst(14,12) === 7.U && io.fcsr_rm >= 5.U
if (cfg.divSqrt) {
val divSqrt_inValid = mem_reg_valid && (mem_ctrl.div || mem_ctrl.sqrt) && !divSqrt_inFlight
val divSqrt_killed = RegNext(divSqrt_inValid && killm, true.B)
when (divSqrt_inValid) {
divSqrt_waddr := mem_reg_inst(11,7)
divSqrt_cp := mem_cp_valid
}
ccover(divSqrt_inFlight && divSqrt_killed, "DIV_KILLED", "divide killed after issued to divider")
ccover(divSqrt_inFlight && mem_reg_valid && (mem_ctrl.div || mem_ctrl.sqrt), "DIV_BUSY", "divider structural hazard")
ccover(mem_reg_valid && divSqrt_write_port_busy, "DIV_WB_STRUCTURAL", "structural hazard on division writeback")
for (t <- floatTypes) {
val tag = mem_ctrl.typeTagOut
val divSqrt = withReset(divSqrt_killed) { Module(new hardfloat.DivSqrtRecFN_small(t.exp, t.sig, 0)) }
divSqrt.io.inValid := divSqrt_inValid && tag === typeTag(t).U
divSqrt.io.sqrtOp := mem_ctrl.sqrt
divSqrt.io.a := maxType.unsafeConvert(fpiu.io.out.bits.in.in1, t)
divSqrt.io.b := maxType.unsafeConvert(fpiu.io.out.bits.in.in2, t)
divSqrt.io.roundingMode := fpiu.io.out.bits.in.rm
divSqrt.io.detectTininess := hardfloat.consts.tininess_afterRounding
when (!divSqrt.io.inReady) { divSqrt_inFlight := true.B } // only 1 in flight
when (divSqrt.io.outValid_div || divSqrt.io.outValid_sqrt) {
divSqrt_wen := !divSqrt_killed
divSqrt_wdata := sanitizeNaN(divSqrt.io.out, t)
divSqrt_flags := divSqrt.io.exceptionFlags
divSqrt_typeTag := typeTag(t).U
}
}
when (divSqrt_killed) { divSqrt_inFlight := false.B }
} else {
when (id_ctrl.div || id_ctrl.sqrt) { io.illegal_rm := true.B }
}
// gate the clock
clock_en_reg := !useClockGating.B ||
io.keep_clock_enabled || // chicken bit
io.valid || // ID stage
req_valid || // EX stage
mem_reg_valid || mem_cp_valid || // MEM stage
wb_reg_valid || wb_cp_valid || // WB stage
wen.orR || divSqrt_inFlight || // post-WB stage
io.ll_resp_val // load writeback
} // leaving gated-clock domain
val fpuImpl = withClock (gated_clock) { new FPUImpl }
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
property.cover(cond, s"FPU_$label", "Core;;" + desc)
}
| module FPUFMAPipe_l4_f64( // @[FPU.scala:697:7]
input clock, // @[FPU.scala:697:7]
input reset, // @[FPU.scala:697:7]
input io_in_valid, // @[FPU.scala:702:14]
input io_in_bits_ren3, // @[FPU.scala:702:14]
input io_in_bits_swap23, // @[FPU.scala:702:14]
input [2:0] io_in_bits_rm, // @[FPU.scala:702:14]
input [1:0] io_in_bits_fmaCmd, // @[FPU.scala:702:14]
input [64:0] io_in_bits_in1, // @[FPU.scala:702:14]
input [64:0] io_in_bits_in2, // @[FPU.scala:702:14]
input [64:0] io_in_bits_in3, // @[FPU.scala:702:14]
output [64:0] io_out_bits_data, // @[FPU.scala:702:14]
output [4:0] io_out_bits_exc // @[FPU.scala:702:14]
);
wire [64:0] _fma_io_out; // @[FPU.scala:719:19]
wire [4:0] _fma_io_exceptionFlags; // @[FPU.scala:719:19]
wire _fma_io_validout; // @[FPU.scala:719:19]
reg valid; // @[FPU.scala:707:22]
reg [2:0] in_rm; // @[FPU.scala:708:15]
reg [1:0] in_fmaCmd; // @[FPU.scala:708:15]
reg [64:0] in_in1; // @[FPU.scala:708:15]
reg [64:0] in_in2; // @[FPU.scala:708:15]
reg [64:0] in_in3; // @[FPU.scala:708:15]
reg [64:0] io_out_pipe_b_data; // @[Valid.scala:142:26]
reg [4:0] io_out_pipe_b_exc; // @[Valid.scala:142:26]
always @(posedge clock) begin // @[FPU.scala:697:7]
valid <= io_in_valid; // @[FPU.scala:707:22]
if (io_in_valid) begin // @[FPU.scala:702:14]
in_rm <= io_in_bits_rm; // @[FPU.scala:708:15]
in_fmaCmd <= io_in_bits_fmaCmd; // @[FPU.scala:708:15]
in_in1 <= io_in_bits_in1; // @[FPU.scala:708:15]
in_in2 <= io_in_bits_swap23 ? 65'h8000000000000000 : io_in_bits_in2; // @[FPU.scala:708:15, :714:8, :715:{23,32}]
in_in3 <= io_in_bits_ren3 | io_in_bits_swap23 ? io_in_bits_in3 : (io_in_bits_in1 ^ io_in_bits_in2) & 65'h10000000000000000; // @[FPU.scala:708:15, :711:{32,50}, :714:8, :716:{21,37,46}]
end
if (_fma_io_validout) begin // @[FPU.scala:719:19]
io_out_pipe_b_data <= ({65{_fma_io_out[63:61] != 3'h7}} | 65'h1EFEFFFFFFFFFFFFF) & _fma_io_out; // @[Valid.scala:142:26]
io_out_pipe_b_exc <= _fma_io_exceptionFlags; // @[Valid.scala:142:26]
end
always @(posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File UnsafeAXI4ToTL.scala:
package ara
import chisel3._
import chisel3.util._
import freechips.rocketchip.amba._
import freechips.rocketchip.amba.axi4._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle {
val data = UInt(dataWidth.W)
val resp = UInt(respWidth.W)
val last = Bool()
val user = BundleMap(userFields)
}
/** Parameters for [[BaseReservableListBuffer]] and all child classes.
*
* @param numEntries Total number of elements that can be stored in the 'data' RAM
* @param numLists Maximum number of linked lists
* @param numBeats Maximum number of beats per entry
*/
case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) {
// Avoid zero-width wires when we call 'log2Ceil'
val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries)
val listBits = if (numLists == 1) 1 else log2Ceil(numLists)
val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats)
}
case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName)
extends MixedAdapterNode(AXI4Imp, TLImp)(
dFn = { case mp =>
TLMasterPortParameters.v2(
masters = mp.masters.zipWithIndex.map { case (m, i) =>
// Support 'numTlTxns' read requests and 'numTlTxns' write requests at once.
val numSourceIds = numTlTxns * 2
TLMasterParameters.v2(
name = m.name,
sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds),
nodePath = m.nodePath
)
},
echoFields = mp.echoFields,
requestFields = AMBAProtField() +: mp.requestFields,
responseKeys = mp.responseKeys
)
},
uFn = { mp =>
AXI4SlavePortParameters(
slaves = mp.managers.map { m =>
val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits))
AXI4SlaveParameters(
address = m.address,
resources = m.resources,
regionType = m.regionType,
executable = m.executable,
nodePath = m.nodePath,
supportsWrite = m.supportsPutPartial.intersect(maxXfer),
supportsRead = m.supportsGet.intersect(maxXfer),
interleavedId = Some(0) // TL2 never interleaves D beats
)
},
beatBytes = mp.beatBytes,
minLatency = mp.minLatency,
responseFields = mp.responseFields,
requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt)
)
}
)
class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule {
require(numTlTxns >= 1)
require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2")
val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt)
lazy val module = new LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
edgeIn.master.masters.foreach { m =>
require(m.aligned, "AXI4ToTL requires aligned requests")
}
val numIds = edgeIn.master.endId
val beatBytes = edgeOut.slave.beatBytes
val maxTransfer = edgeOut.slave.maxTransfer
val maxBeats = maxTransfer / beatBytes
// Look for an Error device to redirect bad requests
val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError")
require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.")
val errorDev = errorDevs.maxBy(_.maxTransfer)
val errorDevAddr = errorDev.address.head.base
require(
errorDev.supportsPutPartial.contains(maxTransfer),
s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer"
)
require(
errorDev.supportsGet.contains(maxTransfer),
s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer"
)
// All of the read-response reordering logic.
val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields)
val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats)
val listBuffer = if (numTlTxns > 1) {
Module(new ReservableListBuffer(listBufData, listBufParams))
} else {
Module(new PassthroughListBuffer(listBufData, listBufParams))
}
// To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to
// 0 for read requests and 1 for write requests.
val isReadSourceBit = 0.U(1.W)
val isWriteSourceBit = 1.U(1.W)
/* Read request logic */
val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val rBytes1 = in.ar.bits.bytes1()
val rSize = OH1ToUInt(rBytes1)
val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize)
val rId = if (numTlTxns > 1) {
Cat(isReadSourceBit, listBuffer.ioReservedIndex)
} else {
isReadSourceBit
}
val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Indicates if there are still valid TileLink source IDs left to use.
val canIssueR = listBuffer.ioReserve.ready
listBuffer.ioReserve.bits := in.ar.bits.id
listBuffer.ioReserve.valid := in.ar.valid && rOut.ready
in.ar.ready := rOut.ready && canIssueR
rOut.valid := in.ar.valid && canIssueR
rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2
rOut.bits.user :<= in.ar.bits.user
rOut.bits.user.lift(AMBAProt).foreach { rProt =>
rProt.privileged := in.ar.bits.prot(0)
rProt.secure := !in.ar.bits.prot(1)
rProt.fetch := in.ar.bits.prot(2)
rProt.bufferable := in.ar.bits.cache(0)
rProt.modifiable := in.ar.bits.cache(1)
rProt.readalloc := in.ar.bits.cache(2)
rProt.writealloc := in.ar.bits.cache(3)
}
/* Write request logic */
// Strip off the MSB, which identifies the transaction as read vs write.
val strippedResponseSourceId = if (numTlTxns > 1) {
out.d.bits.source((out.d.bits.source).getWidth - 2, 0)
} else {
// When there's only 1 TileLink transaction allowed for read/write, then this field is always 0.
0.U(1.W)
}
// Track when a write request burst is in progress.
val writeBurstBusy = RegInit(false.B)
when(in.w.fire) {
writeBurstBusy := !in.w.bits.last
}
val usedWriteIds = RegInit(0.U(numTlTxns.W))
val canIssueW = !usedWriteIds.andR
val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W))
val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W))
usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet
// Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't
// change mid-burst.
val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W))
val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy
val freeWriteIdIndex = OHToUInt(freeWriteIdOH)
freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds
val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val wBytes1 = in.aw.bits.bytes1()
val wSize = OH1ToUInt(wBytes1)
val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize)
val wId = if (numTlTxns > 1) {
Cat(isWriteSourceBit, freeWriteIdIndex)
} else {
isWriteSourceBit
}
val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain
// asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but
// the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb
// bits during a W-channel burst.
in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW
in.w.ready := wOut.ready && in.aw.valid && canIssueW
wOut.valid := in.aw.valid && in.w.valid && canIssueW
wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2
in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ }
wOut.bits.user :<= in.aw.bits.user
wOut.bits.user.lift(AMBAProt).foreach { wProt =>
wProt.privileged := in.aw.bits.prot(0)
wProt.secure := !in.aw.bits.prot(1)
wProt.fetch := in.aw.bits.prot(2)
wProt.bufferable := in.aw.bits.cache(0)
wProt.modifiable := in.aw.bits.cache(1)
wProt.readalloc := in.aw.bits.cache(2)
wProt.writealloc := in.aw.bits.cache(3)
}
// Merge the AXI4 read/write requests into the TL-A channel.
TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut))
/* Read/write response logic */
val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle)))
val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle)))
val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY)
val dHasData = edgeOut.hasData(out.d.bits)
val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d)
val dNumBeats1 = edgeOut.numBeats1(out.d.bits)
// Handle cases where writeack arrives before write is done
val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U
out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck)
listBuffer.ioDataOut.ready := okR.ready
okR.valid := listBuffer.ioDataOut.valid
okB.valid := out.d.valid && !dHasData && !writeEarlyAck
listBuffer.ioResponse.valid := out.d.valid && dHasData
listBuffer.ioResponse.bits.index := strippedResponseSourceId
listBuffer.ioResponse.bits.data.data := out.d.bits.data
listBuffer.ioResponse.bits.data.resp := dResp
listBuffer.ioResponse.bits.data.last := dLast
listBuffer.ioResponse.bits.data.user :<= out.d.bits.user
listBuffer.ioResponse.bits.count := dCount
listBuffer.ioResponse.bits.numBeats1 := dNumBeats1
okR.bits.id := listBuffer.ioDataOut.bits.listIndex
okR.bits.data := listBuffer.ioDataOut.bits.payload.data
okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp
okR.bits.last := listBuffer.ioDataOut.bits.payload.last
okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user
// Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write
// response, mark the write transaction as complete.
val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W))
val writeResponseId = writeIdMap.read(strippedResponseSourceId)
when(wOut.fire) {
writeIdMap.write(freeWriteIdIndex, in.aw.bits.id)
}
when(edgeOut.done(wOut)) {
usedWriteIdsSet := freeWriteIdOH
}
when(okB.fire) {
usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns)
}
okB.bits.id := writeResponseId
okB.bits.resp := dResp
okB.bits.user :<= out.d.bits.user
// AXI4 needs irrevocable behaviour
in.r <> Queue.irrevocable(okR, 1, flow = true)
in.b <> Queue.irrevocable(okB, 1, flow = true)
// Unused channels
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
/* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */
def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = {
val lReqType = reqType.toLowerCase
when(a.valid) {
assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U)
// Narrow transfers and FIXED bursts must be single-beat bursts.
when(a.bits.len =/= 0.U) {
assert(
a.bits.size === log2Ceil(beatBytes).U,
s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)",
1.U << a.bits.size,
a.bits.len + 1.U
)
assert(
a.bits.burst =/= AXI4Parameters.BURST_FIXED,
s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)",
a.bits.len + 1.U
)
}
// Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in
// particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink
// Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts.
}
}
checkRequest(in.ar, "Read")
checkRequest(in.aw, "Write")
}
}
}
object UnsafeAXI4ToTL {
def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = {
val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt))
axi42tl.node
}
}
/* ReservableListBuffer logic, and associated classes. */
class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle {
val index = UInt(params.entryBits.W)
val count = UInt(params.beatBits.W)
val numBeats1 = UInt(params.beatBits.W)
}
class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle {
val listIndex = UInt(params.listBits.W)
}
/** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */
abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends Module {
require(params.numEntries > 0)
require(params.numLists > 0)
val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W))))
val ioReservedIndex = IO(Output(UInt(params.entryBits.W)))
val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params))))
val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params)))
}
/** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve
* linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the
* 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a
* given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order.
*
* ==Constructor==
* @param gen Chisel type of linked list data element
* @param params Other parameters
*
* ==Module IO==
* @param ioReserve Index of list to reserve a new element in
* @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire'
* @param ioResponse Payload containing response data and linked-list-entry index
* @param ioDataOut Payload containing data read from response linked list and linked list index
*/
class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
val valid = RegInit(0.U(params.numLists.W))
val head = Mem(params.numLists, UInt(params.entryBits.W))
val tail = Mem(params.numLists, UInt(params.entryBits.W))
val used = RegInit(0.U(params.numEntries.W))
val next = Mem(params.numEntries, UInt(params.entryBits.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) }
val dataIsPresent = RegInit(0.U(params.numEntries.W))
val beats = Mem(params.numEntries, UInt(params.beatBits.W))
// The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower.
val dataMemReadEnable = WireDefault(false.B)
val dataMemWriteEnable = WireDefault(false.B)
assert(!(dataMemReadEnable && dataMemWriteEnable))
// 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the
// lowest-index entry in the 'data' RAM which is free.
val freeOH = Wire(UInt(params.numEntries.W))
val freeIndex = OHToUInt(freeOH)
freeOH := ~(leftOR(~used) << 1) & ~used
ioReservedIndex := freeIndex
val validSet = WireDefault(0.U(params.numLists.W))
val validClr = WireDefault(0.U(params.numLists.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
val dataIsPresentSet = WireDefault(0.U(params.numEntries.W))
val dataIsPresentClr = WireDefault(0.U(params.numEntries.W))
valid := (valid & ~validClr) | validSet
used := (used & ~usedClr) | usedSet
dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet
/* Reservation logic signals */
val reserveTail = Wire(UInt(params.entryBits.W))
val reserveIsValid = Wire(Bool())
/* Response logic signals */
val responseIndex = Wire(UInt(params.entryBits.W))
val responseListIndex = Wire(UInt(params.listBits.W))
val responseHead = Wire(UInt(params.entryBits.W))
val responseTail = Wire(UInt(params.entryBits.W))
val nextResponseHead = Wire(UInt(params.entryBits.W))
val nextDataIsPresent = Wire(Bool())
val isResponseInOrder = Wire(Bool())
val isEndOfList = Wire(Bool())
val isLastBeat = Wire(Bool())
val isLastResponseBeat = Wire(Bool())
val isLastUnwindBeat = Wire(Bool())
/* Reservation logic */
reserveTail := tail.read(ioReserve.bits)
reserveIsValid := valid(ioReserve.bits)
ioReserve.ready := !used.andR
// When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we
// actually start a new list, rather than appending to a list that's about to disappear.
val reserveResponseSameList = ioReserve.bits === responseListIndex
val appendToAndDestroyList =
ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat
when(ioReserve.fire) {
validSet := UIntToOH(ioReserve.bits, params.numLists)
usedSet := freeOH
when(reserveIsValid && !appendToAndDestroyList) {
next.write(reserveTail, freeIndex)
}.otherwise {
head.write(ioReserve.bits, freeIndex)
}
tail.write(ioReserve.bits, freeIndex)
map.write(freeIndex, ioReserve.bits)
}
/* Response logic */
// The majority of the response logic (reading from and writing to the various RAMs) is common between the
// response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid).
// The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the
// 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and
// response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after
// two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker.
responseHead := head.read(responseListIndex)
responseTail := tail.read(responseListIndex)
nextResponseHead := next.read(responseIndex)
nextDataIsPresent := dataIsPresent(nextResponseHead)
// Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since
// there isn't a next element in the linked list.
isResponseInOrder := responseHead === responseIndex
isEndOfList := responseHead === responseTail
isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1
// When a response's last beat is sent to the output channel, mark it as completed. This can happen in two
// situations:
// 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM
// reservation was never needed.
// 2. An entry is read out of the 'data' SRAM (within the unwind FSM).
when(ioDataOut.fire && isLastBeat) {
// Mark the reservation as no-longer-used.
usedClr := UIntToOH(responseIndex, params.numEntries)
// If the response is in-order, then we're popping an element from this linked list.
when(isEndOfList) {
// Once we pop the last element from a linked list, mark it as no-longer-present.
validClr := UIntToOH(responseListIndex, params.numLists)
}.otherwise {
// Move the linked list's head pointer to the new head pointer.
head.write(responseListIndex, nextResponseHead)
}
}
// If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding.
when(ioResponse.fire && !isResponseInOrder) {
dataMemWriteEnable := true.B
when(isLastResponseBeat) {
dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries)
beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1)
}
}
// Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to.
val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats)
(responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) =>
when(select && dataMemWriteEnable) {
seqMem.write(ioResponse.bits.index, ioResponse.bits.data)
}
}
/* Response unwind logic */
// Unwind FSM state definitions
val sIdle :: sUnwinding :: Nil = Enum(2)
val unwindState = RegInit(sIdle)
val busyUnwinding = unwindState === sUnwinding
val startUnwind = Wire(Bool())
val stopUnwind = Wire(Bool())
when(startUnwind) {
unwindState := sUnwinding
}.elsewhen(stopUnwind) {
unwindState := sIdle
}
assert(!(startUnwind && stopUnwind))
// Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to
// become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is
// invalid.
//
// Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to
// worry about overwriting the 'data' SRAM's output when we start the unwind FSM.
startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent
// Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of
// two things happens:
// 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent)
// 2. There are no more outstanding responses in this list (isEndOfList)
//
// Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are
// passing from 'ioResponse' to 'ioDataOut'.
stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList)
val isUnwindBurstOver = Wire(Bool())
val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable)
// Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of
// beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we
// increment 'beatCounter' until it reaches 'unwindBeats1'.
val unwindBeats1 = Reg(UInt(params.beatBits.W))
val nextBeatCounter = Wire(UInt(params.beatBits.W))
val beatCounter = RegNext(nextBeatCounter)
isUnwindBurstOver := beatCounter === unwindBeats1
when(startNewBurst) {
unwindBeats1 := beats.read(nextResponseHead)
nextBeatCounter := 0.U
}.elsewhen(dataMemReadEnable) {
nextBeatCounter := beatCounter + 1.U
}.otherwise {
nextBeatCounter := beatCounter
}
// When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next
// entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which
// happens at the start of reading a new stored burst).
val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst)
responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index)
// Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the
// SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead
// holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'.
val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex)
// The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid
// until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle).
val unwindDataIsValid = RegInit(false.B)
when(dataMemReadEnable) {
unwindDataIsValid := true.B
}.elsewhen(ioDataOut.fire) {
unwindDataIsValid := false.B
}
isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid
// Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats.
isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat)
// Select which SRAM to read from based on the beat counter.
val dataOutputVec = Wire(Vec(params.numBeats, gen))
val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats)
(nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) =>
dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable)
}
// Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured
// by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading
// from.
val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable)
// Mark 'data' burst entries as no-longer-present as they get read out of the SRAM.
when(dataMemReadEnable) {
dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries)
}
// As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue
// a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know
// we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be
// consumed by the output channel).
val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready
dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem)
// While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need
// 'responseListIndex' to be coherent for the entire unwind process.
val rawResponseListIndex = map.read(responseIndex)
val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst)
responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex)
// Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are
// just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that
// could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be
// single-ported.
ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding
// Either pass an in-order response to the output channel, or data read from the unwind FSM.
ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder)
ioDataOut.bits.listIndex := responseListIndex
ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data)
// It's an error to get a response that isn't associated with a valid linked list.
when(ioResponse.fire || unwindDataIsValid) {
assert(
valid(responseListIndex),
"No linked list exists at index %d, mapped from %d",
responseListIndex,
responseIndex
)
}
when(busyUnwinding && dataMemReadEnable) {
assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order")
}
}
/** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1.
*
* Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to
* reorder any responses, or store any linked lists.
*/
class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1")
val used = RegInit(0.U(params.numEntries.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
used := (used & ~usedClr) | usedSet
ioReserve.ready := used === 0.U
// Store which list index was reserved, we need to return this value when we get a response.
when(ioReserve.fire) {
usedSet := 1.U
map.write(0.U, ioReserve.bits)
}
// There's only one valid linked list entry, which is at index 0.
ioReservedIndex := 0.U
val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1
// Mark the linked list as empty when we get the last beat in a response.
// Note that 'ioResponse.fire === ioDataOut.fire'.
when(ioResponse.fire && isLastResponseBeat) {
usedClr := 1.U
}
// Always pass the response data straight through, since we never need to reorder the response data.
ioDataOut.bits.listIndex := map.read(0.U)
ioDataOut.bits.payload := ioResponse.bits.data
ioDataOut.valid := ioResponse.valid
ioResponse.ready := ioDataOut.ready
}
| module dataMems_324( // @[UnsafeAXI4ToTL.scala:365:62]
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [66:0] R0_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [66:0] W0_data
);
dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62]
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data)
); // @[UnsafeAXI4ToTL.scala:365:62]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File BankedStore.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util.DescribedSRAM
import scala.math.{max, min}
abstract class BankedStoreAddress(val inner: Boolean, params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val noop = Bool() // do not actually use the SRAMs, just block their use
val way = UInt(params.wayBits.W)
val set = UInt(params.setBits.W)
val beat = UInt((if (inner) params.innerBeatBits else params.outerBeatBits).W)
val mask = UInt((if (inner) params.innerMaskBits else params.outerMaskBits).W)
}
trait BankedStoreRW
{
val write = Bool()
}
class BankedStoreOuterAddress(params: InclusiveCacheParameters) extends BankedStoreAddress(false, params)
class BankedStoreInnerAddress(params: InclusiveCacheParameters) extends BankedStoreAddress(true, params)
class BankedStoreInnerAddressRW(params: InclusiveCacheParameters) extends BankedStoreInnerAddress(params) with BankedStoreRW
abstract class BankedStoreData(val inner: Boolean, params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val data = UInt(((if (inner) params.inner.manager.beatBytes else params.outer.manager.beatBytes)*8).W)
}
class BankedStoreOuterData(params: InclusiveCacheParameters) extends BankedStoreData(false, params)
class BankedStoreInnerData(params: InclusiveCacheParameters) extends BankedStoreData(true, params)
class BankedStoreInnerPoison(params: InclusiveCacheParameters) extends BankedStoreInnerData(params)
class BankedStoreOuterPoison(params: InclusiveCacheParameters) extends BankedStoreOuterData(params)
class BankedStoreInnerDecoded(params: InclusiveCacheParameters) extends BankedStoreInnerData(params)
class BankedStoreOuterDecoded(params: InclusiveCacheParameters) extends BankedStoreOuterData(params)
class BankedStore(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val sinkC_adr = Flipped(Decoupled(new BankedStoreInnerAddress(params)))
val sinkC_dat = Flipped(new BankedStoreInnerPoison(params))
val sinkD_adr = Flipped(Decoupled(new BankedStoreOuterAddress(params)))
val sinkD_dat = Flipped(new BankedStoreOuterPoison(params))
val sourceC_adr = Flipped(Decoupled(new BankedStoreOuterAddress(params)))
val sourceC_dat = new BankedStoreOuterDecoded(params)
val sourceD_radr = Flipped(Decoupled(new BankedStoreInnerAddress(params)))
val sourceD_rdat = new BankedStoreInnerDecoded(params)
val sourceD_wadr = Flipped(Decoupled(new BankedStoreInnerAddress(params)))
val sourceD_wdat = Flipped(new BankedStoreInnerPoison(params))
})
val innerBytes = params.inner.manager.beatBytes
val outerBytes = params.outer.manager.beatBytes
val rowBytes = params.micro.portFactor * max(innerBytes, outerBytes)
require (rowBytes < params.cache.sizeBytes)
val rowEntries = params.cache.sizeBytes / rowBytes
val rowBits = log2Ceil(rowEntries)
val numBanks = rowBytes / params.micro.writeBytes
val codeBits = 8*params.micro.writeBytes
val cc_banks = Seq.tabulate(numBanks) {
i =>
DescribedSRAM(
name = s"cc_banks_$i",
desc = "Banked Store",
size = rowEntries,
data = UInt(codeBits.W)
)
}
// These constraints apply on the port priorities:
// sourceC > sinkD outgoing Release > incoming Grant (we start eviction+refill concurrently)
// sinkC > sourceC incoming ProbeAck > outgoing ProbeAck (we delay probeack writeback by 1 cycle for QoR)
// sinkC > sourceDr incoming ProbeAck > SourceD read (we delay probeack writeback by 1 cycle for QoR)
// sourceDw > sourceDr modified data visible on next cycle (needed to ensure SourceD forward progress)
// sinkC > sourceC inner ProbeAck > outer ProbeAck (make wormhole routing possible [not yet implemented])
// sinkC&D > sourceD* beat arrival > beat read|update (make wormhole routing possible [not yet implemented])
// Combining these restrictions yields a priority scheme of:
// sinkC > sourceC > sinkD > sourceDw > sourceDr
// ^^^^^^^^^^^^^^^ outer interface
// Requests have different port widths, but we don't want to allow cutting in line.
// Suppose we have requests A > B > C requesting ports --A-, --BB, ---C.
// The correct arbitration is to allow --A- only, not --AC.
// Obviously --A-, BB--, ---C should still be resolved to BBAC.
class Request extends Bundle {
val wen = Bool()
val index = UInt(rowBits.W)
val bankSel = UInt(numBanks.W)
val bankSum = UInt(numBanks.W) // OR of all higher priority bankSels
val bankEn = UInt(numBanks.W) // ports actually activated by request
val data = Vec(numBanks, UInt(codeBits.W))
}
def req[T <: BankedStoreAddress](b: DecoupledIO[T], write: Bool, d: UInt): Request = {
val beatBytes = if (b.bits.inner) innerBytes else outerBytes
val ports = beatBytes / params.micro.writeBytes
val bankBits = log2Ceil(numBanks / ports)
val words = Seq.tabulate(ports) { i =>
val data = d((i + 1) * 8 * params.micro.writeBytes - 1, i * 8 * params.micro.writeBytes)
data
}
val a = if (params.cache.blockBytes == beatBytes) Cat(b.bits.way, b.bits.set) else Cat(b.bits.way, b.bits.set, b.bits.beat)
val m = b.bits.mask
val out = Wire(new Request)
val select = UIntToOH(a(bankBits-1, 0), numBanks/ports)
val ready = Cat(Seq.tabulate(numBanks/ports) { i => !(out.bankSum((i+1)*ports-1, i*ports) & m).orR } .reverse)
b.ready := ready(a(bankBits-1, 0))
out.wen := write
out.index := a >> bankBits
out.bankSel := Mux(b.valid, FillInterleaved(ports, select) & Fill(numBanks/ports, m), 0.U)
out.bankEn := Mux(b.bits.noop, 0.U, out.bankSel & FillInterleaved(ports, ready))
out.data := Seq.fill(numBanks/ports) { words }.flatten
out
}
val innerData = 0.U((8*innerBytes).W)
val outerData = 0.U((8*outerBytes).W)
val W = true.B
val R = false.B
val sinkC_req = req(io.sinkC_adr, W, io.sinkC_dat.data)
val sinkD_req = req(io.sinkD_adr, W, io.sinkD_dat.data)
val sourceC_req = req(io.sourceC_adr, R, outerData)
val sourceD_rreq = req(io.sourceD_radr, R, innerData)
val sourceD_wreq = req(io.sourceD_wadr, W, io.sourceD_wdat.data)
// See the comments above for why this prioritization is used
val reqs = Seq(sinkC_req, sourceC_req, sinkD_req, sourceD_wreq, sourceD_rreq)
// Connect priorities; note that even if a request does not go through due to failing
// to obtain a needed subbank, it still blocks overlapping lower priority requests.
reqs.foldLeft(0.U) { case (sum, req) =>
req.bankSum := sum
req.bankSel | sum
}
// Access the banks
val regout = VecInit(cc_banks.zipWithIndex.map { case (b, i) =>
val en = reqs.map(_.bankEn(i)).reduce(_||_)
val sel = reqs.map(_.bankSel(i))
val wen = PriorityMux(sel, reqs.map(_.wen))
val idx = PriorityMux(sel, reqs.map(_.index))
val data= PriorityMux(sel, reqs.map(_.data(i)))
when (wen && en) { b.write(idx, data) }
RegEnable(b.read(idx, !wen && en), RegNext(!wen && en))
})
val regsel_sourceC = RegNext(RegNext(sourceC_req.bankEn))
val regsel_sourceD = RegNext(RegNext(sourceD_rreq.bankEn))
val decodeC = regout.zipWithIndex.map {
case (r, i) => Mux(regsel_sourceC(i), r, 0.U)
}.grouped(outerBytes/params.micro.writeBytes).toList.transpose.map(s => s.reduce(_|_))
io.sourceC_dat.data := Cat(decodeC.reverse)
val decodeD = regout.zipWithIndex.map {
// Intentionally not Mux1H and/or an indexed-mux b/c we want it 0 when !sel to save decode power
case (r, i) => Mux(regsel_sourceD(i), r, 0.U)
}.grouped(innerBytes/params.micro.writeBytes).toList.transpose.map(s => s.reduce(_|_))
io.sourceD_rdat.data := Cat(decodeD.reverse)
private def banks = cc_banks.map("\"" + _.pathName + "\"").mkString(",")
def json: String = s"""{"widthBytes":${params.micro.writeBytes},"mem":[${banks}]}"""
}
File DescribedSRAM.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3.{Data, SyncReadMem, Vec}
import chisel3.util.log2Ceil
object DescribedSRAM {
def apply[T <: Data](
name: String,
desc: String,
size: BigInt, // depth
data: T
): SyncReadMem[T] = {
val mem = SyncReadMem(size, data)
mem.suggestName(name)
val granWidth = data match {
case v: Vec[_] => v.head.getWidth
case d => d.getWidth
}
val uid = 0
Annotated.srams(
component = mem,
name = name,
address_width = log2Ceil(size),
data_width = data.getWidth,
depth = size,
description = desc,
write_mask_granularity = granWidth
)
mem
}
}
| module BankedStore_7( // @[BankedStore.scala:59:7]
input clock, // @[BankedStore.scala:59:7]
input reset, // @[BankedStore.scala:59:7]
output io_sinkC_adr_ready, // @[BankedStore.scala:61:14]
input io_sinkC_adr_valid, // @[BankedStore.scala:61:14]
input io_sinkC_adr_bits_noop, // @[BankedStore.scala:61:14]
input [3:0] io_sinkC_adr_bits_way, // @[BankedStore.scala:61:14]
input [10:0] io_sinkC_adr_bits_set, // @[BankedStore.scala:61:14]
input [1:0] io_sinkC_adr_bits_beat, // @[BankedStore.scala:61:14]
input [1:0] io_sinkC_adr_bits_mask, // @[BankedStore.scala:61:14]
input [127:0] io_sinkC_dat_data, // @[BankedStore.scala:61:14]
output io_sinkD_adr_ready, // @[BankedStore.scala:61:14]
input io_sinkD_adr_valid, // @[BankedStore.scala:61:14]
input io_sinkD_adr_bits_noop, // @[BankedStore.scala:61:14]
input [3:0] io_sinkD_adr_bits_way, // @[BankedStore.scala:61:14]
input [10:0] io_sinkD_adr_bits_set, // @[BankedStore.scala:61:14]
input [2:0] io_sinkD_adr_bits_beat, // @[BankedStore.scala:61:14]
input [63:0] io_sinkD_dat_data, // @[BankedStore.scala:61:14]
output io_sourceC_adr_ready, // @[BankedStore.scala:61:14]
input io_sourceC_adr_valid, // @[BankedStore.scala:61:14]
input [3:0] io_sourceC_adr_bits_way, // @[BankedStore.scala:61:14]
input [10:0] io_sourceC_adr_bits_set, // @[BankedStore.scala:61:14]
input [2:0] io_sourceC_adr_bits_beat, // @[BankedStore.scala:61:14]
output [63:0] io_sourceC_dat_data, // @[BankedStore.scala:61:14]
output io_sourceD_radr_ready, // @[BankedStore.scala:61:14]
input io_sourceD_radr_valid, // @[BankedStore.scala:61:14]
input [3:0] io_sourceD_radr_bits_way, // @[BankedStore.scala:61:14]
input [10:0] io_sourceD_radr_bits_set, // @[BankedStore.scala:61:14]
input [1:0] io_sourceD_radr_bits_beat, // @[BankedStore.scala:61:14]
input [1:0] io_sourceD_radr_bits_mask, // @[BankedStore.scala:61:14]
output [127:0] io_sourceD_rdat_data, // @[BankedStore.scala:61:14]
output io_sourceD_wadr_ready, // @[BankedStore.scala:61:14]
input io_sourceD_wadr_valid, // @[BankedStore.scala:61:14]
input [3:0] io_sourceD_wadr_bits_way, // @[BankedStore.scala:61:14]
input [10:0] io_sourceD_wadr_bits_set, // @[BankedStore.scala:61:14]
input [1:0] io_sourceD_wadr_bits_beat, // @[BankedStore.scala:61:14]
input [1:0] io_sourceD_wadr_bits_mask, // @[BankedStore.scala:61:14]
input [127:0] io_sourceD_wdat_data // @[BankedStore.scala:61:14]
);
wire [7:0] sinkC_req_bankSel; // @[BankedStore.scala:128:19]
wire [63:0] _cc_banks_7_RW0_rdata; // @[DescribedSRAM.scala:17:26]
wire [63:0] _cc_banks_6_RW0_rdata; // @[DescribedSRAM.scala:17:26]
wire [63:0] _cc_banks_5_RW0_rdata; // @[DescribedSRAM.scala:17:26]
wire [63:0] _cc_banks_4_RW0_rdata; // @[DescribedSRAM.scala:17:26]
wire [63:0] _cc_banks_3_RW0_rdata; // @[DescribedSRAM.scala:17:26]
wire [63:0] _cc_banks_2_RW0_rdata; // @[DescribedSRAM.scala:17:26]
wire [63:0] _cc_banks_1_RW0_rdata; // @[DescribedSRAM.scala:17:26]
wire [63:0] _cc_banks_0_RW0_rdata; // @[DescribedSRAM.scala:17:26]
wire io_sinkC_adr_valid_0 = io_sinkC_adr_valid; // @[BankedStore.scala:59:7]
wire io_sinkC_adr_bits_noop_0 = io_sinkC_adr_bits_noop; // @[BankedStore.scala:59:7]
wire [3:0] io_sinkC_adr_bits_way_0 = io_sinkC_adr_bits_way; // @[BankedStore.scala:59:7]
wire [10:0] io_sinkC_adr_bits_set_0 = io_sinkC_adr_bits_set; // @[BankedStore.scala:59:7]
wire [1:0] io_sinkC_adr_bits_beat_0 = io_sinkC_adr_bits_beat; // @[BankedStore.scala:59:7]
wire [1:0] io_sinkC_adr_bits_mask_0 = io_sinkC_adr_bits_mask; // @[BankedStore.scala:59:7]
wire [127:0] io_sinkC_dat_data_0 = io_sinkC_dat_data; // @[BankedStore.scala:59:7]
wire io_sinkD_adr_valid_0 = io_sinkD_adr_valid; // @[BankedStore.scala:59:7]
wire io_sinkD_adr_bits_noop_0 = io_sinkD_adr_bits_noop; // @[BankedStore.scala:59:7]
wire [3:0] io_sinkD_adr_bits_way_0 = io_sinkD_adr_bits_way; // @[BankedStore.scala:59:7]
wire [10:0] io_sinkD_adr_bits_set_0 = io_sinkD_adr_bits_set; // @[BankedStore.scala:59:7]
wire [2:0] io_sinkD_adr_bits_beat_0 = io_sinkD_adr_bits_beat; // @[BankedStore.scala:59:7]
wire [63:0] io_sinkD_dat_data_0 = io_sinkD_dat_data; // @[BankedStore.scala:59:7]
wire io_sourceC_adr_valid_0 = io_sourceC_adr_valid; // @[BankedStore.scala:59:7]
wire [3:0] io_sourceC_adr_bits_way_0 = io_sourceC_adr_bits_way; // @[BankedStore.scala:59:7]
wire [10:0] io_sourceC_adr_bits_set_0 = io_sourceC_adr_bits_set; // @[BankedStore.scala:59:7]
wire [2:0] io_sourceC_adr_bits_beat_0 = io_sourceC_adr_bits_beat; // @[BankedStore.scala:59:7]
wire io_sourceD_radr_valid_0 = io_sourceD_radr_valid; // @[BankedStore.scala:59:7]
wire [3:0] io_sourceD_radr_bits_way_0 = io_sourceD_radr_bits_way; // @[BankedStore.scala:59:7]
wire [10:0] io_sourceD_radr_bits_set_0 = io_sourceD_radr_bits_set; // @[BankedStore.scala:59:7]
wire [1:0] io_sourceD_radr_bits_beat_0 = io_sourceD_radr_bits_beat; // @[BankedStore.scala:59:7]
wire [1:0] io_sourceD_radr_bits_mask_0 = io_sourceD_radr_bits_mask; // @[BankedStore.scala:59:7]
wire io_sourceD_wadr_valid_0 = io_sourceD_wadr_valid; // @[BankedStore.scala:59:7]
wire [3:0] io_sourceD_wadr_bits_way_0 = io_sourceD_wadr_bits_way; // @[BankedStore.scala:59:7]
wire [10:0] io_sourceD_wadr_bits_set_0 = io_sourceD_wadr_bits_set; // @[BankedStore.scala:59:7]
wire [1:0] io_sourceD_wadr_bits_beat_0 = io_sourceD_wadr_bits_beat; // @[BankedStore.scala:59:7]
wire [1:0] io_sourceD_wadr_bits_mask_0 = io_sourceD_wadr_bits_mask; // @[BankedStore.scala:59:7]
wire [127:0] io_sourceD_wdat_data_0 = io_sourceD_wdat_data; // @[BankedStore.scala:59:7]
wire [7:0] sinkC_req_bankSum = 8'h0; // @[BankedStore.scala:128:19]
wire [1:0] _sinkC_req_ready_T = 2'h0; // @[BankedStore.scala:131:71]
wire [1:0] _sinkC_req_ready_T_1 = 2'h0; // @[BankedStore.scala:131:96]
wire [1:0] _sinkC_req_ready_T_4 = 2'h0; // @[BankedStore.scala:131:71]
wire [1:0] _sinkC_req_ready_T_5 = 2'h0; // @[BankedStore.scala:131:96]
wire [1:0] _sinkC_req_ready_T_8 = 2'h0; // @[BankedStore.scala:131:71]
wire [1:0] _sinkC_req_ready_T_9 = 2'h0; // @[BankedStore.scala:131:96]
wire [1:0] _sinkC_req_ready_T_12 = 2'h0; // @[BankedStore.scala:131:71]
wire [1:0] _sinkC_req_ready_T_13 = 2'h0; // @[BankedStore.scala:131:96]
wire [1:0] sinkC_req_ready_lo = 2'h3; // @[BankedStore.scala:131:21]
wire [1:0] sinkC_req_ready_hi = 2'h3; // @[BankedStore.scala:131:21]
wire [1:0] _sinkC_req_out_bankEn_T_4 = 2'h3; // @[BankedStore.scala:137:72]
wire [1:0] _sinkC_req_out_bankEn_T_5 = 2'h3; // @[BankedStore.scala:137:72]
wire [1:0] _sinkC_req_out_bankEn_T_6 = 2'h3; // @[BankedStore.scala:137:72]
wire [1:0] _sinkC_req_out_bankEn_T_7 = 2'h3; // @[BankedStore.scala:137:72]
wire [3:0] sinkC_req_ready = 4'hF; // @[BankedStore.scala:131:21, :137:72]
wire [3:0] sinkC_req_out_bankEn_lo = 4'hF; // @[BankedStore.scala:131:21, :137:72]
wire [3:0] sinkC_req_out_bankEn_hi = 4'hF; // @[BankedStore.scala:131:21, :137:72]
wire [7:0] _sinkC_req_out_bankEn_T_8 = 8'hFF; // @[BankedStore.scala:136:71, :137:72]
wire [7:0] _sinkD_req_out_bankSel_T_10 = 8'hFF; // @[BankedStore.scala:136:71, :137:72]
wire [7:0] _sourceC_req_out_bankSel_T_10 = 8'hFF; // @[BankedStore.scala:136:71, :137:72]
wire [63:0] sourceC_req_data_0 = 64'h0; // @[BankedStore.scala:128:19]
wire [63:0] sourceC_req_data_1 = 64'h0; // @[BankedStore.scala:128:19]
wire [63:0] sourceC_req_data_2 = 64'h0; // @[BankedStore.scala:128:19]
wire [63:0] sourceC_req_data_3 = 64'h0; // @[BankedStore.scala:128:19]
wire [63:0] sourceC_req_data_4 = 64'h0; // @[BankedStore.scala:128:19]
wire [63:0] sourceC_req_data_5 = 64'h0; // @[BankedStore.scala:128:19]
wire [63:0] sourceC_req_data_6 = 64'h0; // @[BankedStore.scala:128:19]
wire [63:0] sourceC_req_data_7 = 64'h0; // @[BankedStore.scala:128:19]
wire [63:0] sourceD_rreq_data_0 = 64'h0; // @[BankedStore.scala:128:19]
wire [63:0] sourceD_rreq_data_1 = 64'h0; // @[BankedStore.scala:128:19]
wire [63:0] sourceD_rreq_data_2 = 64'h0; // @[BankedStore.scala:128:19]
wire [63:0] sourceD_rreq_data_3 = 64'h0; // @[BankedStore.scala:128:19]
wire [63:0] sourceD_rreq_data_4 = 64'h0; // @[BankedStore.scala:128:19]
wire [63:0] sourceD_rreq_data_5 = 64'h0; // @[BankedStore.scala:128:19]
wire [63:0] sourceD_rreq_data_6 = 64'h0; // @[BankedStore.scala:128:19]
wire [63:0] sourceD_rreq_data_7 = 64'h0; // @[BankedStore.scala:128:19]
wire io_sourceC_adr_bits_noop = 1'h0; // @[BankedStore.scala:59:7]
wire io_sourceD_radr_bits_noop = 1'h0; // @[BankedStore.scala:59:7]
wire io_sourceD_wadr_bits_noop = 1'h0; // @[BankedStore.scala:59:7]
wire _sinkC_req_ready_T_2 = 1'h0; // @[BankedStore.scala:131:101]
wire _sinkC_req_ready_T_6 = 1'h0; // @[BankedStore.scala:131:101]
wire _sinkC_req_ready_T_10 = 1'h0; // @[BankedStore.scala:131:101]
wire _sinkC_req_ready_T_14 = 1'h0; // @[BankedStore.scala:131:101]
wire sourceC_req_wen = 1'h0; // @[BankedStore.scala:128:19]
wire sourceD_rreq_wen = 1'h0; // @[BankedStore.scala:128:19]
wire io_sinkD_adr_bits_mask = 1'h1; // @[BankedStore.scala:59:7]
wire io_sourceC_adr_bits_mask = 1'h1; // @[BankedStore.scala:59:7]
wire sinkC_req_wen = 1'h1; // @[BankedStore.scala:128:19]
wire _sinkC_req_ready_T_3 = 1'h1; // @[BankedStore.scala:131:58]
wire _sinkC_req_ready_T_7 = 1'h1; // @[BankedStore.scala:131:58]
wire _sinkC_req_ready_T_11 = 1'h1; // @[BankedStore.scala:131:58]
wire _sinkC_req_ready_T_15 = 1'h1; // @[BankedStore.scala:131:58]
wire _sinkC_req_io_sinkC_adr_ready_T_2; // @[BankedStore.scala:132:21]
wire _sinkC_req_out_bankEn_T = 1'h1; // @[BankedStore.scala:137:72]
wire _sinkC_req_out_bankEn_T_1 = 1'h1; // @[BankedStore.scala:137:72]
wire _sinkC_req_out_bankEn_T_2 = 1'h1; // @[BankedStore.scala:137:72]
wire _sinkC_req_out_bankEn_T_3 = 1'h1; // @[BankedStore.scala:137:72]
wire sinkD_req_wen = 1'h1; // @[BankedStore.scala:128:19]
wire _sinkD_req_out_bankSel_T_9 = 1'h1; // @[BankedStore.scala:136:71]
wire _sourceC_req_out_bankSel_T_9 = 1'h1; // @[BankedStore.scala:136:71]
wire sourceD_wreq_wen = 1'h1; // @[BankedStore.scala:128:19]
wire _sinkD_req_io_sinkD_adr_ready_T_2; // @[BankedStore.scala:132:21]
wire [63:0] sinkD_req_words_0 = io_sinkD_dat_data_0; // @[BankedStore.scala:59:7, :123:19]
wire _sourceC_req_io_sourceC_adr_ready_T_2; // @[BankedStore.scala:132:21]
wire [63:0] decodeC_0; // @[BankedStore.scala:180:85]
wire _sourceD_rreq_io_sourceD_radr_ready_T_2; // @[BankedStore.scala:132:21]
wire [127:0] _io_sourceD_rdat_data_T; // @[BankedStore.scala:189:30]
wire _sourceD_wreq_io_sourceD_wadr_ready_T_2; // @[BankedStore.scala:132:21]
wire io_sinkC_adr_ready_0; // @[BankedStore.scala:59:7]
wire io_sinkD_adr_ready_0; // @[BankedStore.scala:59:7]
wire io_sourceC_adr_ready_0; // @[BankedStore.scala:59:7]
wire [63:0] io_sourceC_dat_data_0; // @[BankedStore.scala:59:7]
wire io_sourceD_radr_ready_0; // @[BankedStore.scala:59:7]
wire [127:0] io_sourceD_rdat_data_0; // @[BankedStore.scala:59:7]
wire io_sourceD_wadr_ready_0; // @[BankedStore.scala:59:7]
wire [14:0] regout_idx; // @[Mux.scala:50:70]
wire _regout_T; // @[BankedStore.scala:171:15]
wire [14:0] _regout_WIRE; // @[BankedStore.scala:172:21]
wire _regout_T_2; // @[BankedStore.scala:172:32]
wire [14:0] regout_idx_1; // @[Mux.scala:50:70]
wire _regout_T_5; // @[BankedStore.scala:171:15]
wire [14:0] _regout_WIRE_1; // @[BankedStore.scala:172:21]
wire _regout_T_7; // @[BankedStore.scala:172:32]
wire [14:0] regout_idx_2; // @[Mux.scala:50:70]
wire _regout_T_10; // @[BankedStore.scala:171:15]
wire [14:0] _regout_WIRE_2; // @[BankedStore.scala:172:21]
wire _regout_T_12; // @[BankedStore.scala:172:32]
wire [14:0] regout_idx_3; // @[Mux.scala:50:70]
wire _regout_T_15; // @[BankedStore.scala:171:15]
wire [14:0] _regout_WIRE_3; // @[BankedStore.scala:172:21]
wire _regout_T_17; // @[BankedStore.scala:172:32]
wire [14:0] regout_idx_4; // @[Mux.scala:50:70]
wire _regout_T_20; // @[BankedStore.scala:171:15]
wire [14:0] _regout_WIRE_4; // @[BankedStore.scala:172:21]
wire _regout_T_22; // @[BankedStore.scala:172:32]
wire [14:0] regout_idx_5; // @[Mux.scala:50:70]
wire _regout_T_25; // @[BankedStore.scala:171:15]
wire [14:0] _regout_WIRE_5; // @[BankedStore.scala:172:21]
wire _regout_T_27; // @[BankedStore.scala:172:32]
wire [14:0] regout_idx_6; // @[Mux.scala:50:70]
wire _regout_T_30; // @[BankedStore.scala:171:15]
wire [14:0] _regout_WIRE_6; // @[BankedStore.scala:172:21]
wire _regout_T_32; // @[BankedStore.scala:172:32]
wire [14:0] regout_idx_7; // @[Mux.scala:50:70]
wire _regout_T_35; // @[BankedStore.scala:171:15]
wire [14:0] _regout_WIRE_7; // @[BankedStore.scala:172:21]
wire _regout_T_37; // @[BankedStore.scala:172:32]
wire [63:0] sinkC_req_words_0 = io_sinkC_dat_data_0[63:0]; // @[BankedStore.scala:59:7, :123:19]
wire [63:0] sinkC_req_data_0 = sinkC_req_words_0; // @[BankedStore.scala:123:19, :128:19]
wire [63:0] sinkC_req_data_2 = sinkC_req_words_0; // @[BankedStore.scala:123:19, :128:19]
wire [63:0] sinkC_req_data_4 = sinkC_req_words_0; // @[BankedStore.scala:123:19, :128:19]
wire [63:0] sinkC_req_data_6 = sinkC_req_words_0; // @[BankedStore.scala:123:19, :128:19]
wire [63:0] sinkC_req_words_1 = io_sinkC_dat_data_0[127:64]; // @[BankedStore.scala:59:7, :123:19]
wire [63:0] sinkC_req_data_1 = sinkC_req_words_1; // @[BankedStore.scala:123:19, :128:19]
wire [63:0] sinkC_req_data_3 = sinkC_req_words_1; // @[BankedStore.scala:123:19, :128:19]
wire [63:0] sinkC_req_data_5 = sinkC_req_words_1; // @[BankedStore.scala:123:19, :128:19]
wire [63:0] sinkC_req_data_7 = sinkC_req_words_1; // @[BankedStore.scala:123:19, :128:19]
wire [14:0] sinkC_req_a_hi = {io_sinkC_adr_bits_way_0, io_sinkC_adr_bits_set_0}; // @[BankedStore.scala:59:7, :126:91]
wire [16:0] sinkC_req_a = {sinkC_req_a_hi, io_sinkC_adr_bits_beat_0}; // @[BankedStore.scala:59:7, :126:91]
wire [14:0] _sinkC_req_out_index_T; // @[BankedStore.scala:135:23]
wire [7:0] _sinkC_req_out_bankSel_T_12; // @[BankedStore.scala:136:24]
wire [7:0] _sinkC_req_out_bankEn_T_9 = sinkC_req_bankSel; // @[BankedStore.scala:128:19, :137:55]
wire [7:0] sourceC_req_bankSum = sinkC_req_bankSel; // @[BankedStore.scala:128:19]
wire [7:0] _sinkC_req_out_bankEn_T_10; // @[BankedStore.scala:137:24]
wire [14:0] sinkC_req_index; // @[BankedStore.scala:128:19]
wire [7:0] sinkC_req_bankEn; // @[BankedStore.scala:128:19]
wire [1:0] _sinkC_req_select_T = sinkC_req_a[1:0]; // @[BankedStore.scala:126:91, :130:28]
wire [1:0] _sinkC_req_io_sinkC_adr_ready_T = sinkC_req_a[1:0]; // @[BankedStore.scala:126:91, :130:28, :132:23]
wire [1:0] sinkC_req_select_shiftAmount = _sinkC_req_select_T; // @[OneHot.scala:64:49]
wire [3:0] _sinkC_req_select_T_1 = 4'h1 << sinkC_req_select_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [3:0] sinkC_req_select = _sinkC_req_select_T_1; // @[OneHot.scala:65:{12,27}]
wire [3:0] _sinkC_req_io_sinkC_adr_ready_T_1 = 4'hF >> _sinkC_req_io_sinkC_adr_ready_T; // @[BankedStore.scala:131:21, :132:{21,23}, :137:72]
assign _sinkC_req_io_sinkC_adr_ready_T_2 = _sinkC_req_io_sinkC_adr_ready_T_1[0]; // @[BankedStore.scala:132:21]
assign io_sinkC_adr_ready_0 = _sinkC_req_io_sinkC_adr_ready_T_2; // @[BankedStore.scala:59:7, :132:21]
assign _sinkC_req_out_index_T = sinkC_req_a[16:2]; // @[BankedStore.scala:126:91, :135:23]
assign sinkC_req_index = _sinkC_req_out_index_T; // @[BankedStore.scala:128:19, :135:23]
wire _sinkC_req_out_bankSel_T = sinkC_req_select[0]; // @[OneHot.scala:65:27]
wire _sinkC_req_out_bankSel_T_1 = sinkC_req_select[1]; // @[OneHot.scala:65:27]
wire _sinkC_req_out_bankSel_T_2 = sinkC_req_select[2]; // @[OneHot.scala:65:27]
wire _sinkC_req_out_bankSel_T_3 = sinkC_req_select[3]; // @[OneHot.scala:65:27]
wire [1:0] _sinkC_req_out_bankSel_T_4 = {2{_sinkC_req_out_bankSel_T}}; // @[BankedStore.scala:136:49]
wire [1:0] _sinkC_req_out_bankSel_T_5 = {2{_sinkC_req_out_bankSel_T_1}}; // @[BankedStore.scala:136:49]
wire [1:0] _sinkC_req_out_bankSel_T_6 = {2{_sinkC_req_out_bankSel_T_2}}; // @[BankedStore.scala:136:49]
wire [1:0] _sinkC_req_out_bankSel_T_7 = {2{_sinkC_req_out_bankSel_T_3}}; // @[BankedStore.scala:136:49]
wire [3:0] sinkC_req_out_bankSel_lo = {_sinkC_req_out_bankSel_T_5, _sinkC_req_out_bankSel_T_4}; // @[BankedStore.scala:136:49]
wire [3:0] sinkC_req_out_bankSel_hi = {_sinkC_req_out_bankSel_T_7, _sinkC_req_out_bankSel_T_6}; // @[BankedStore.scala:136:49]
wire [7:0] _sinkC_req_out_bankSel_T_8 = {sinkC_req_out_bankSel_hi, sinkC_req_out_bankSel_lo}; // @[BankedStore.scala:136:49]
wire [3:0] _sinkC_req_out_bankSel_T_9 = {2{io_sinkC_adr_bits_mask_0}}; // @[BankedStore.scala:59:7, :136:71]
wire [7:0] _sinkC_req_out_bankSel_T_10 = {2{_sinkC_req_out_bankSel_T_9}}; // @[BankedStore.scala:136:71]
wire [7:0] _sinkC_req_out_bankSel_T_11 = _sinkC_req_out_bankSel_T_8 & _sinkC_req_out_bankSel_T_10; // @[BankedStore.scala:136:{49,65,71}]
assign _sinkC_req_out_bankSel_T_12 = io_sinkC_adr_valid_0 ? _sinkC_req_out_bankSel_T_11 : 8'h0; // @[BankedStore.scala:59:7, :136:{24,65}]
assign sinkC_req_bankSel = _sinkC_req_out_bankSel_T_12; // @[BankedStore.scala:128:19, :136:24]
assign _sinkC_req_out_bankEn_T_10 = io_sinkC_adr_bits_noop_0 ? 8'h0 : _sinkC_req_out_bankEn_T_9; // @[BankedStore.scala:59:7, :137:{24,55}]
assign sinkC_req_bankEn = _sinkC_req_out_bankEn_T_10; // @[BankedStore.scala:128:19, :137:24]
wire [63:0] sinkD_req_data_0 = sinkD_req_words_0; // @[BankedStore.scala:123:19, :128:19]
wire [63:0] sinkD_req_data_1 = sinkD_req_words_0; // @[BankedStore.scala:123:19, :128:19]
wire [63:0] sinkD_req_data_2 = sinkD_req_words_0; // @[BankedStore.scala:123:19, :128:19]
wire [63:0] sinkD_req_data_3 = sinkD_req_words_0; // @[BankedStore.scala:123:19, :128:19]
wire [63:0] sinkD_req_data_4 = sinkD_req_words_0; // @[BankedStore.scala:123:19, :128:19]
wire [63:0] sinkD_req_data_5 = sinkD_req_words_0; // @[BankedStore.scala:123:19, :128:19]
wire [63:0] sinkD_req_data_6 = sinkD_req_words_0; // @[BankedStore.scala:123:19, :128:19]
wire [63:0] sinkD_req_data_7 = sinkD_req_words_0; // @[BankedStore.scala:123:19, :128:19]
wire [14:0] sinkD_req_a_hi = {io_sinkD_adr_bits_way_0, io_sinkD_adr_bits_set_0}; // @[BankedStore.scala:59:7, :126:91]
wire [17:0] sinkD_req_a = {sinkD_req_a_hi, io_sinkD_adr_bits_beat_0}; // @[BankedStore.scala:59:7, :126:91]
wire [14:0] _sinkD_req_out_index_T; // @[BankedStore.scala:135:23]
wire [7:0] _sinkD_req_out_bankSel_T_12; // @[BankedStore.scala:136:24]
wire [7:0] _sinkD_req_out_bankEn_T_10; // @[BankedStore.scala:137:24]
wire [14:0] sinkD_req_index; // @[BankedStore.scala:128:19]
wire [7:0] sinkD_req_bankSel; // @[BankedStore.scala:128:19]
wire [7:0] sinkD_req_bankSum; // @[BankedStore.scala:128:19]
wire [7:0] sinkD_req_bankEn; // @[BankedStore.scala:128:19]
wire [2:0] _sinkD_req_select_T = sinkD_req_a[2:0]; // @[BankedStore.scala:126:91, :130:28]
wire [2:0] _sinkD_req_io_sinkD_adr_ready_T = sinkD_req_a[2:0]; // @[BankedStore.scala:126:91, :130:28, :132:23]
wire [2:0] sinkD_req_select_shiftAmount = _sinkD_req_select_T; // @[OneHot.scala:64:49]
wire [7:0] _sinkD_req_select_T_1 = 8'h1 << sinkD_req_select_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [7:0] sinkD_req_select = _sinkD_req_select_T_1; // @[OneHot.scala:65:{12,27}]
wire _sinkD_req_ready_T = sinkD_req_bankSum[0]; // @[BankedStore.scala:128:19, :131:71]
wire _sinkD_req_ready_T_1 = _sinkD_req_ready_T; // @[BankedStore.scala:131:{71,96}]
wire _sinkD_req_ready_T_2 = _sinkD_req_ready_T_1; // @[BankedStore.scala:131:{96,101}]
wire _sinkD_req_ready_T_3 = ~_sinkD_req_ready_T_2; // @[BankedStore.scala:131:{58,101}]
wire _sinkD_req_ready_T_4 = sinkD_req_bankSum[1]; // @[BankedStore.scala:128:19, :131:71]
wire _sinkD_req_ready_T_5 = _sinkD_req_ready_T_4; // @[BankedStore.scala:131:{71,96}]
wire _sinkD_req_ready_T_6 = _sinkD_req_ready_T_5; // @[BankedStore.scala:131:{96,101}]
wire _sinkD_req_ready_T_7 = ~_sinkD_req_ready_T_6; // @[BankedStore.scala:131:{58,101}]
wire _sinkD_req_ready_T_8 = sinkD_req_bankSum[2]; // @[BankedStore.scala:128:19, :131:71]
wire _sinkD_req_ready_T_9 = _sinkD_req_ready_T_8; // @[BankedStore.scala:131:{71,96}]
wire _sinkD_req_ready_T_10 = _sinkD_req_ready_T_9; // @[BankedStore.scala:131:{96,101}]
wire _sinkD_req_ready_T_11 = ~_sinkD_req_ready_T_10; // @[BankedStore.scala:131:{58,101}]
wire _sinkD_req_ready_T_12 = sinkD_req_bankSum[3]; // @[BankedStore.scala:128:19, :131:71]
wire _sinkD_req_ready_T_13 = _sinkD_req_ready_T_12; // @[BankedStore.scala:131:{71,96}]
wire _sinkD_req_ready_T_14 = _sinkD_req_ready_T_13; // @[BankedStore.scala:131:{96,101}]
wire _sinkD_req_ready_T_15 = ~_sinkD_req_ready_T_14; // @[BankedStore.scala:131:{58,101}]
wire _sinkD_req_ready_T_16 = sinkD_req_bankSum[4]; // @[BankedStore.scala:128:19, :131:71]
wire _sinkD_req_ready_T_17 = _sinkD_req_ready_T_16; // @[BankedStore.scala:131:{71,96}]
wire _sinkD_req_ready_T_18 = _sinkD_req_ready_T_17; // @[BankedStore.scala:131:{96,101}]
wire _sinkD_req_ready_T_19 = ~_sinkD_req_ready_T_18; // @[BankedStore.scala:131:{58,101}]
wire _sinkD_req_ready_T_20 = sinkD_req_bankSum[5]; // @[BankedStore.scala:128:19, :131:71]
wire _sinkD_req_ready_T_21 = _sinkD_req_ready_T_20; // @[BankedStore.scala:131:{71,96}]
wire _sinkD_req_ready_T_22 = _sinkD_req_ready_T_21; // @[BankedStore.scala:131:{96,101}]
wire _sinkD_req_ready_T_23 = ~_sinkD_req_ready_T_22; // @[BankedStore.scala:131:{58,101}]
wire _sinkD_req_ready_T_24 = sinkD_req_bankSum[6]; // @[BankedStore.scala:128:19, :131:71]
wire _sinkD_req_ready_T_25 = _sinkD_req_ready_T_24; // @[BankedStore.scala:131:{71,96}]
wire _sinkD_req_ready_T_26 = _sinkD_req_ready_T_25; // @[BankedStore.scala:131:{96,101}]
wire _sinkD_req_ready_T_27 = ~_sinkD_req_ready_T_26; // @[BankedStore.scala:131:{58,101}]
wire _sinkD_req_ready_T_28 = sinkD_req_bankSum[7]; // @[BankedStore.scala:128:19, :131:71]
wire _sinkD_req_ready_T_29 = _sinkD_req_ready_T_28; // @[BankedStore.scala:131:{71,96}]
wire _sinkD_req_ready_T_30 = _sinkD_req_ready_T_29; // @[BankedStore.scala:131:{96,101}]
wire _sinkD_req_ready_T_31 = ~_sinkD_req_ready_T_30; // @[BankedStore.scala:131:{58,101}]
wire [1:0] sinkD_req_ready_lo_lo = {_sinkD_req_ready_T_7, _sinkD_req_ready_T_3}; // @[BankedStore.scala:131:{21,58}]
wire [1:0] sinkD_req_ready_lo_hi = {_sinkD_req_ready_T_15, _sinkD_req_ready_T_11}; // @[BankedStore.scala:131:{21,58}]
wire [3:0] sinkD_req_ready_lo = {sinkD_req_ready_lo_hi, sinkD_req_ready_lo_lo}; // @[BankedStore.scala:131:21]
wire [1:0] sinkD_req_ready_hi_lo = {_sinkD_req_ready_T_23, _sinkD_req_ready_T_19}; // @[BankedStore.scala:131:{21,58}]
wire [1:0] sinkD_req_ready_hi_hi = {_sinkD_req_ready_T_31, _sinkD_req_ready_T_27}; // @[BankedStore.scala:131:{21,58}]
wire [3:0] sinkD_req_ready_hi = {sinkD_req_ready_hi_hi, sinkD_req_ready_hi_lo}; // @[BankedStore.scala:131:21]
wire [7:0] sinkD_req_ready = {sinkD_req_ready_hi, sinkD_req_ready_lo}; // @[BankedStore.scala:131:21]
wire [7:0] _sinkD_req_io_sinkD_adr_ready_T_1 = sinkD_req_ready >> _sinkD_req_io_sinkD_adr_ready_T; // @[BankedStore.scala:131:21, :132:{21,23}]
assign _sinkD_req_io_sinkD_adr_ready_T_2 = _sinkD_req_io_sinkD_adr_ready_T_1[0]; // @[BankedStore.scala:132:21]
assign io_sinkD_adr_ready_0 = _sinkD_req_io_sinkD_adr_ready_T_2; // @[BankedStore.scala:59:7, :132:21]
assign _sinkD_req_out_index_T = sinkD_req_a[17:3]; // @[BankedStore.scala:126:91, :135:23]
assign sinkD_req_index = _sinkD_req_out_index_T; // @[BankedStore.scala:128:19, :135:23]
wire _sinkD_req_out_bankSel_T = sinkD_req_select[0]; // @[OneHot.scala:65:27]
wire _sinkD_req_out_bankSel_T_1 = sinkD_req_select[1]; // @[OneHot.scala:65:27]
wire _sinkD_req_out_bankSel_T_2 = sinkD_req_select[2]; // @[OneHot.scala:65:27]
wire _sinkD_req_out_bankSel_T_3 = sinkD_req_select[3]; // @[OneHot.scala:65:27]
wire _sinkD_req_out_bankSel_T_4 = sinkD_req_select[4]; // @[OneHot.scala:65:27]
wire _sinkD_req_out_bankSel_T_5 = sinkD_req_select[5]; // @[OneHot.scala:65:27]
wire _sinkD_req_out_bankSel_T_6 = sinkD_req_select[6]; // @[OneHot.scala:65:27]
wire _sinkD_req_out_bankSel_T_7 = sinkD_req_select[7]; // @[OneHot.scala:65:27]
wire [1:0] sinkD_req_out_bankSel_lo_lo = {_sinkD_req_out_bankSel_T_1, _sinkD_req_out_bankSel_T}; // @[BankedStore.scala:136:49]
wire [1:0] sinkD_req_out_bankSel_lo_hi = {_sinkD_req_out_bankSel_T_3, _sinkD_req_out_bankSel_T_2}; // @[BankedStore.scala:136:49]
wire [3:0] sinkD_req_out_bankSel_lo = {sinkD_req_out_bankSel_lo_hi, sinkD_req_out_bankSel_lo_lo}; // @[BankedStore.scala:136:49]
wire [1:0] sinkD_req_out_bankSel_hi_lo = {_sinkD_req_out_bankSel_T_5, _sinkD_req_out_bankSel_T_4}; // @[BankedStore.scala:136:49]
wire [1:0] sinkD_req_out_bankSel_hi_hi = {_sinkD_req_out_bankSel_T_7, _sinkD_req_out_bankSel_T_6}; // @[BankedStore.scala:136:49]
wire [3:0] sinkD_req_out_bankSel_hi = {sinkD_req_out_bankSel_hi_hi, sinkD_req_out_bankSel_hi_lo}; // @[BankedStore.scala:136:49]
wire [7:0] _sinkD_req_out_bankSel_T_8 = {sinkD_req_out_bankSel_hi, sinkD_req_out_bankSel_lo}; // @[BankedStore.scala:136:49]
wire [7:0] _sinkD_req_out_bankSel_T_11 = _sinkD_req_out_bankSel_T_8; // @[BankedStore.scala:136:{49,65}]
assign _sinkD_req_out_bankSel_T_12 = io_sinkD_adr_valid_0 ? _sinkD_req_out_bankSel_T_11 : 8'h0; // @[BankedStore.scala:59:7, :136:{24,65}]
assign sinkD_req_bankSel = _sinkD_req_out_bankSel_T_12; // @[BankedStore.scala:128:19, :136:24]
wire _sinkD_req_out_bankEn_T = sinkD_req_ready[0]; // @[BankedStore.scala:131:21, :137:72]
wire _sinkD_req_out_bankEn_T_1 = sinkD_req_ready[1]; // @[BankedStore.scala:131:21, :137:72]
wire _sinkD_req_out_bankEn_T_2 = sinkD_req_ready[2]; // @[BankedStore.scala:131:21, :137:72]
wire _sinkD_req_out_bankEn_T_3 = sinkD_req_ready[3]; // @[BankedStore.scala:131:21, :137:72]
wire _sinkD_req_out_bankEn_T_4 = sinkD_req_ready[4]; // @[BankedStore.scala:131:21, :137:72]
wire _sinkD_req_out_bankEn_T_5 = sinkD_req_ready[5]; // @[BankedStore.scala:131:21, :137:72]
wire _sinkD_req_out_bankEn_T_6 = sinkD_req_ready[6]; // @[BankedStore.scala:131:21, :137:72]
wire _sinkD_req_out_bankEn_T_7 = sinkD_req_ready[7]; // @[BankedStore.scala:131:21, :137:72]
wire [1:0] sinkD_req_out_bankEn_lo_lo = {_sinkD_req_out_bankEn_T_1, _sinkD_req_out_bankEn_T}; // @[BankedStore.scala:137:72]
wire [1:0] sinkD_req_out_bankEn_lo_hi = {_sinkD_req_out_bankEn_T_3, _sinkD_req_out_bankEn_T_2}; // @[BankedStore.scala:137:72]
wire [3:0] sinkD_req_out_bankEn_lo = {sinkD_req_out_bankEn_lo_hi, sinkD_req_out_bankEn_lo_lo}; // @[BankedStore.scala:137:72]
wire [1:0] sinkD_req_out_bankEn_hi_lo = {_sinkD_req_out_bankEn_T_5, _sinkD_req_out_bankEn_T_4}; // @[BankedStore.scala:137:72]
wire [1:0] sinkD_req_out_bankEn_hi_hi = {_sinkD_req_out_bankEn_T_7, _sinkD_req_out_bankEn_T_6}; // @[BankedStore.scala:137:72]
wire [3:0] sinkD_req_out_bankEn_hi = {sinkD_req_out_bankEn_hi_hi, sinkD_req_out_bankEn_hi_lo}; // @[BankedStore.scala:137:72]
wire [7:0] _sinkD_req_out_bankEn_T_8 = {sinkD_req_out_bankEn_hi, sinkD_req_out_bankEn_lo}; // @[BankedStore.scala:137:72]
wire [7:0] _sinkD_req_out_bankEn_T_9 = sinkD_req_bankSel & _sinkD_req_out_bankEn_T_8; // @[BankedStore.scala:128:19, :137:{55,72}]
assign _sinkD_req_out_bankEn_T_10 = io_sinkD_adr_bits_noop_0 ? 8'h0 : _sinkD_req_out_bankEn_T_9; // @[BankedStore.scala:59:7, :137:{24,55}]
assign sinkD_req_bankEn = _sinkD_req_out_bankEn_T_10; // @[BankedStore.scala:128:19, :137:24]
wire [14:0] sourceC_req_a_hi = {io_sourceC_adr_bits_way_0, io_sourceC_adr_bits_set_0}; // @[BankedStore.scala:59:7, :126:91]
wire [17:0] sourceC_req_a = {sourceC_req_a_hi, io_sourceC_adr_bits_beat_0}; // @[BankedStore.scala:59:7, :126:91]
wire [14:0] _sourceC_req_out_index_T; // @[BankedStore.scala:135:23]
wire [7:0] _sourceC_req_out_bankSel_T_12; // @[BankedStore.scala:136:24]
wire [7:0] _sourceC_req_out_bankEn_T_10; // @[BankedStore.scala:137:24]
wire [14:0] sourceC_req_index; // @[BankedStore.scala:128:19]
wire [7:0] sourceC_req_bankSel; // @[BankedStore.scala:128:19]
wire [7:0] sourceC_req_bankEn; // @[BankedStore.scala:128:19]
wire [2:0] _sourceC_req_select_T = sourceC_req_a[2:0]; // @[BankedStore.scala:126:91, :130:28]
wire [2:0] _sourceC_req_io_sourceC_adr_ready_T = sourceC_req_a[2:0]; // @[BankedStore.scala:126:91, :130:28, :132:23]
wire [2:0] sourceC_req_select_shiftAmount = _sourceC_req_select_T; // @[OneHot.scala:64:49]
wire [7:0] _sourceC_req_select_T_1 = 8'h1 << sourceC_req_select_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [7:0] sourceC_req_select = _sourceC_req_select_T_1; // @[OneHot.scala:65:{12,27}]
wire _sourceC_req_ready_T = sourceC_req_bankSum[0]; // @[BankedStore.scala:128:19, :131:71]
wire _sourceC_req_ready_T_1 = _sourceC_req_ready_T; // @[BankedStore.scala:131:{71,96}]
wire _sourceC_req_ready_T_2 = _sourceC_req_ready_T_1; // @[BankedStore.scala:131:{96,101}]
wire _sourceC_req_ready_T_3 = ~_sourceC_req_ready_T_2; // @[BankedStore.scala:131:{58,101}]
wire _sourceC_req_ready_T_4 = sourceC_req_bankSum[1]; // @[BankedStore.scala:128:19, :131:71]
wire _sourceC_req_ready_T_5 = _sourceC_req_ready_T_4; // @[BankedStore.scala:131:{71,96}]
wire _sourceC_req_ready_T_6 = _sourceC_req_ready_T_5; // @[BankedStore.scala:131:{96,101}]
wire _sourceC_req_ready_T_7 = ~_sourceC_req_ready_T_6; // @[BankedStore.scala:131:{58,101}]
wire _sourceC_req_ready_T_8 = sourceC_req_bankSum[2]; // @[BankedStore.scala:128:19, :131:71]
wire _sourceC_req_ready_T_9 = _sourceC_req_ready_T_8; // @[BankedStore.scala:131:{71,96}]
wire _sourceC_req_ready_T_10 = _sourceC_req_ready_T_9; // @[BankedStore.scala:131:{96,101}]
wire _sourceC_req_ready_T_11 = ~_sourceC_req_ready_T_10; // @[BankedStore.scala:131:{58,101}]
wire _sourceC_req_ready_T_12 = sourceC_req_bankSum[3]; // @[BankedStore.scala:128:19, :131:71]
wire _sourceC_req_ready_T_13 = _sourceC_req_ready_T_12; // @[BankedStore.scala:131:{71,96}]
wire _sourceC_req_ready_T_14 = _sourceC_req_ready_T_13; // @[BankedStore.scala:131:{96,101}]
wire _sourceC_req_ready_T_15 = ~_sourceC_req_ready_T_14; // @[BankedStore.scala:131:{58,101}]
wire _sourceC_req_ready_T_16 = sourceC_req_bankSum[4]; // @[BankedStore.scala:128:19, :131:71]
wire _sourceC_req_ready_T_17 = _sourceC_req_ready_T_16; // @[BankedStore.scala:131:{71,96}]
wire _sourceC_req_ready_T_18 = _sourceC_req_ready_T_17; // @[BankedStore.scala:131:{96,101}]
wire _sourceC_req_ready_T_19 = ~_sourceC_req_ready_T_18; // @[BankedStore.scala:131:{58,101}]
wire _sourceC_req_ready_T_20 = sourceC_req_bankSum[5]; // @[BankedStore.scala:128:19, :131:71]
wire _sourceC_req_ready_T_21 = _sourceC_req_ready_T_20; // @[BankedStore.scala:131:{71,96}]
wire _sourceC_req_ready_T_22 = _sourceC_req_ready_T_21; // @[BankedStore.scala:131:{96,101}]
wire _sourceC_req_ready_T_23 = ~_sourceC_req_ready_T_22; // @[BankedStore.scala:131:{58,101}]
wire _sourceC_req_ready_T_24 = sourceC_req_bankSum[6]; // @[BankedStore.scala:128:19, :131:71]
wire _sourceC_req_ready_T_25 = _sourceC_req_ready_T_24; // @[BankedStore.scala:131:{71,96}]
wire _sourceC_req_ready_T_26 = _sourceC_req_ready_T_25; // @[BankedStore.scala:131:{96,101}]
wire _sourceC_req_ready_T_27 = ~_sourceC_req_ready_T_26; // @[BankedStore.scala:131:{58,101}]
wire _sourceC_req_ready_T_28 = sourceC_req_bankSum[7]; // @[BankedStore.scala:128:19, :131:71]
wire _sourceC_req_ready_T_29 = _sourceC_req_ready_T_28; // @[BankedStore.scala:131:{71,96}]
wire _sourceC_req_ready_T_30 = _sourceC_req_ready_T_29; // @[BankedStore.scala:131:{96,101}]
wire _sourceC_req_ready_T_31 = ~_sourceC_req_ready_T_30; // @[BankedStore.scala:131:{58,101}]
wire [1:0] sourceC_req_ready_lo_lo = {_sourceC_req_ready_T_7, _sourceC_req_ready_T_3}; // @[BankedStore.scala:131:{21,58}]
wire [1:0] sourceC_req_ready_lo_hi = {_sourceC_req_ready_T_15, _sourceC_req_ready_T_11}; // @[BankedStore.scala:131:{21,58}]
wire [3:0] sourceC_req_ready_lo = {sourceC_req_ready_lo_hi, sourceC_req_ready_lo_lo}; // @[BankedStore.scala:131:21]
wire [1:0] sourceC_req_ready_hi_lo = {_sourceC_req_ready_T_23, _sourceC_req_ready_T_19}; // @[BankedStore.scala:131:{21,58}]
wire [1:0] sourceC_req_ready_hi_hi = {_sourceC_req_ready_T_31, _sourceC_req_ready_T_27}; // @[BankedStore.scala:131:{21,58}]
wire [3:0] sourceC_req_ready_hi = {sourceC_req_ready_hi_hi, sourceC_req_ready_hi_lo}; // @[BankedStore.scala:131:21]
wire [7:0] sourceC_req_ready = {sourceC_req_ready_hi, sourceC_req_ready_lo}; // @[BankedStore.scala:131:21]
wire [7:0] _sourceC_req_io_sourceC_adr_ready_T_1 = sourceC_req_ready >> _sourceC_req_io_sourceC_adr_ready_T; // @[BankedStore.scala:131:21, :132:{21,23}]
assign _sourceC_req_io_sourceC_adr_ready_T_2 = _sourceC_req_io_sourceC_adr_ready_T_1[0]; // @[BankedStore.scala:132:21]
assign io_sourceC_adr_ready_0 = _sourceC_req_io_sourceC_adr_ready_T_2; // @[BankedStore.scala:59:7, :132:21]
assign _sourceC_req_out_index_T = sourceC_req_a[17:3]; // @[BankedStore.scala:126:91, :135:23]
assign sourceC_req_index = _sourceC_req_out_index_T; // @[BankedStore.scala:128:19, :135:23]
wire _sourceC_req_out_bankSel_T = sourceC_req_select[0]; // @[OneHot.scala:65:27]
wire _sourceC_req_out_bankSel_T_1 = sourceC_req_select[1]; // @[OneHot.scala:65:27]
wire _sourceC_req_out_bankSel_T_2 = sourceC_req_select[2]; // @[OneHot.scala:65:27]
wire _sourceC_req_out_bankSel_T_3 = sourceC_req_select[3]; // @[OneHot.scala:65:27]
wire _sourceC_req_out_bankSel_T_4 = sourceC_req_select[4]; // @[OneHot.scala:65:27]
wire _sourceC_req_out_bankSel_T_5 = sourceC_req_select[5]; // @[OneHot.scala:65:27]
wire _sourceC_req_out_bankSel_T_6 = sourceC_req_select[6]; // @[OneHot.scala:65:27]
wire _sourceC_req_out_bankSel_T_7 = sourceC_req_select[7]; // @[OneHot.scala:65:27]
wire [1:0] sourceC_req_out_bankSel_lo_lo = {_sourceC_req_out_bankSel_T_1, _sourceC_req_out_bankSel_T}; // @[BankedStore.scala:136:49]
wire [1:0] sourceC_req_out_bankSel_lo_hi = {_sourceC_req_out_bankSel_T_3, _sourceC_req_out_bankSel_T_2}; // @[BankedStore.scala:136:49]
wire [3:0] sourceC_req_out_bankSel_lo = {sourceC_req_out_bankSel_lo_hi, sourceC_req_out_bankSel_lo_lo}; // @[BankedStore.scala:136:49]
wire [1:0] sourceC_req_out_bankSel_hi_lo = {_sourceC_req_out_bankSel_T_5, _sourceC_req_out_bankSel_T_4}; // @[BankedStore.scala:136:49]
wire [1:0] sourceC_req_out_bankSel_hi_hi = {_sourceC_req_out_bankSel_T_7, _sourceC_req_out_bankSel_T_6}; // @[BankedStore.scala:136:49]
wire [3:0] sourceC_req_out_bankSel_hi = {sourceC_req_out_bankSel_hi_hi, sourceC_req_out_bankSel_hi_lo}; // @[BankedStore.scala:136:49]
wire [7:0] _sourceC_req_out_bankSel_T_8 = {sourceC_req_out_bankSel_hi, sourceC_req_out_bankSel_lo}; // @[BankedStore.scala:136:49]
wire [7:0] _sourceC_req_out_bankSel_T_11 = _sourceC_req_out_bankSel_T_8; // @[BankedStore.scala:136:{49,65}]
assign _sourceC_req_out_bankSel_T_12 = io_sourceC_adr_valid_0 ? _sourceC_req_out_bankSel_T_11 : 8'h0; // @[BankedStore.scala:59:7, :136:{24,65}]
assign sourceC_req_bankSel = _sourceC_req_out_bankSel_T_12; // @[BankedStore.scala:128:19, :136:24]
wire _sourceC_req_out_bankEn_T = sourceC_req_ready[0]; // @[BankedStore.scala:131:21, :137:72]
wire _sourceC_req_out_bankEn_T_1 = sourceC_req_ready[1]; // @[BankedStore.scala:131:21, :137:72]
wire _sourceC_req_out_bankEn_T_2 = sourceC_req_ready[2]; // @[BankedStore.scala:131:21, :137:72]
wire _sourceC_req_out_bankEn_T_3 = sourceC_req_ready[3]; // @[BankedStore.scala:131:21, :137:72]
wire _sourceC_req_out_bankEn_T_4 = sourceC_req_ready[4]; // @[BankedStore.scala:131:21, :137:72]
wire _sourceC_req_out_bankEn_T_5 = sourceC_req_ready[5]; // @[BankedStore.scala:131:21, :137:72]
wire _sourceC_req_out_bankEn_T_6 = sourceC_req_ready[6]; // @[BankedStore.scala:131:21, :137:72]
wire _sourceC_req_out_bankEn_T_7 = sourceC_req_ready[7]; // @[BankedStore.scala:131:21, :137:72]
wire [1:0] sourceC_req_out_bankEn_lo_lo = {_sourceC_req_out_bankEn_T_1, _sourceC_req_out_bankEn_T}; // @[BankedStore.scala:137:72]
wire [1:0] sourceC_req_out_bankEn_lo_hi = {_sourceC_req_out_bankEn_T_3, _sourceC_req_out_bankEn_T_2}; // @[BankedStore.scala:137:72]
wire [3:0] sourceC_req_out_bankEn_lo = {sourceC_req_out_bankEn_lo_hi, sourceC_req_out_bankEn_lo_lo}; // @[BankedStore.scala:137:72]
wire [1:0] sourceC_req_out_bankEn_hi_lo = {_sourceC_req_out_bankEn_T_5, _sourceC_req_out_bankEn_T_4}; // @[BankedStore.scala:137:72]
wire [1:0] sourceC_req_out_bankEn_hi_hi = {_sourceC_req_out_bankEn_T_7, _sourceC_req_out_bankEn_T_6}; // @[BankedStore.scala:137:72]
wire [3:0] sourceC_req_out_bankEn_hi = {sourceC_req_out_bankEn_hi_hi, sourceC_req_out_bankEn_hi_lo}; // @[BankedStore.scala:137:72]
wire [7:0] _sourceC_req_out_bankEn_T_8 = {sourceC_req_out_bankEn_hi, sourceC_req_out_bankEn_lo}; // @[BankedStore.scala:137:72]
wire [7:0] _sourceC_req_out_bankEn_T_9 = sourceC_req_bankSel & _sourceC_req_out_bankEn_T_8; // @[BankedStore.scala:128:19, :137:{55,72}]
assign _sourceC_req_out_bankEn_T_10 = _sourceC_req_out_bankEn_T_9; // @[BankedStore.scala:137:{24,55}]
assign sourceC_req_bankEn = _sourceC_req_out_bankEn_T_10; // @[BankedStore.scala:128:19, :137:24]
wire [14:0] sourceD_rreq_a_hi = {io_sourceD_radr_bits_way_0, io_sourceD_radr_bits_set_0}; // @[BankedStore.scala:59:7, :126:91]
wire [16:0] sourceD_rreq_a = {sourceD_rreq_a_hi, io_sourceD_radr_bits_beat_0}; // @[BankedStore.scala:59:7, :126:91]
wire [14:0] _sourceD_rreq_out_index_T; // @[BankedStore.scala:135:23]
wire [7:0] _sourceD_rreq_out_bankSel_T_12; // @[BankedStore.scala:136:24]
wire [7:0] _sourceD_rreq_out_bankEn_T_10; // @[BankedStore.scala:137:24]
wire [14:0] sourceD_rreq_index; // @[BankedStore.scala:128:19]
wire [7:0] sourceD_rreq_bankSel; // @[BankedStore.scala:128:19]
wire [7:0] sourceD_rreq_bankSum; // @[BankedStore.scala:128:19]
wire [7:0] sourceD_rreq_bankEn; // @[BankedStore.scala:128:19]
wire [1:0] _sourceD_rreq_select_T = sourceD_rreq_a[1:0]; // @[BankedStore.scala:126:91, :130:28]
wire [1:0] _sourceD_rreq_io_sourceD_radr_ready_T = sourceD_rreq_a[1:0]; // @[BankedStore.scala:126:91, :130:28, :132:23]
wire [1:0] sourceD_rreq_select_shiftAmount = _sourceD_rreq_select_T; // @[OneHot.scala:64:49]
wire [3:0] _sourceD_rreq_select_T_1 = 4'h1 << sourceD_rreq_select_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [3:0] sourceD_rreq_select = _sourceD_rreq_select_T_1; // @[OneHot.scala:65:{12,27}]
wire [1:0] _sourceD_rreq_ready_T = sourceD_rreq_bankSum[1:0]; // @[BankedStore.scala:128:19, :131:71]
wire [1:0] _sourceD_rreq_ready_T_1 = _sourceD_rreq_ready_T & io_sourceD_radr_bits_mask_0; // @[BankedStore.scala:59:7, :131:{71,96}]
wire _sourceD_rreq_ready_T_2 = |_sourceD_rreq_ready_T_1; // @[BankedStore.scala:131:{96,101}]
wire _sourceD_rreq_ready_T_3 = ~_sourceD_rreq_ready_T_2; // @[BankedStore.scala:131:{58,101}]
wire [1:0] _sourceD_rreq_ready_T_4 = sourceD_rreq_bankSum[3:2]; // @[BankedStore.scala:128:19, :131:71]
wire [1:0] _sourceD_rreq_ready_T_5 = _sourceD_rreq_ready_T_4 & io_sourceD_radr_bits_mask_0; // @[BankedStore.scala:59:7, :131:{71,96}]
wire _sourceD_rreq_ready_T_6 = |_sourceD_rreq_ready_T_5; // @[BankedStore.scala:131:{96,101}]
wire _sourceD_rreq_ready_T_7 = ~_sourceD_rreq_ready_T_6; // @[BankedStore.scala:131:{58,101}]
wire [1:0] _sourceD_rreq_ready_T_8 = sourceD_rreq_bankSum[5:4]; // @[BankedStore.scala:128:19, :131:71]
wire [1:0] _sourceD_rreq_ready_T_9 = _sourceD_rreq_ready_T_8 & io_sourceD_radr_bits_mask_0; // @[BankedStore.scala:59:7, :131:{71,96}]
wire _sourceD_rreq_ready_T_10 = |_sourceD_rreq_ready_T_9; // @[BankedStore.scala:131:{96,101}]
wire _sourceD_rreq_ready_T_11 = ~_sourceD_rreq_ready_T_10; // @[BankedStore.scala:131:{58,101}]
wire [1:0] _sourceD_rreq_ready_T_12 = sourceD_rreq_bankSum[7:6]; // @[BankedStore.scala:128:19, :131:71]
wire [1:0] _sourceD_rreq_ready_T_13 = _sourceD_rreq_ready_T_12 & io_sourceD_radr_bits_mask_0; // @[BankedStore.scala:59:7, :131:{71,96}]
wire _sourceD_rreq_ready_T_14 = |_sourceD_rreq_ready_T_13; // @[BankedStore.scala:131:{96,101}]
wire _sourceD_rreq_ready_T_15 = ~_sourceD_rreq_ready_T_14; // @[BankedStore.scala:131:{58,101}]
wire [1:0] sourceD_rreq_ready_lo = {_sourceD_rreq_ready_T_7, _sourceD_rreq_ready_T_3}; // @[BankedStore.scala:131:{21,58}]
wire [1:0] sourceD_rreq_ready_hi = {_sourceD_rreq_ready_T_15, _sourceD_rreq_ready_T_11}; // @[BankedStore.scala:131:{21,58}]
wire [3:0] sourceD_rreq_ready = {sourceD_rreq_ready_hi, sourceD_rreq_ready_lo}; // @[BankedStore.scala:131:21]
wire [3:0] _sourceD_rreq_io_sourceD_radr_ready_T_1 = sourceD_rreq_ready >> _sourceD_rreq_io_sourceD_radr_ready_T; // @[BankedStore.scala:131:21, :132:{21,23}]
assign _sourceD_rreq_io_sourceD_radr_ready_T_2 = _sourceD_rreq_io_sourceD_radr_ready_T_1[0]; // @[BankedStore.scala:132:21]
assign io_sourceD_radr_ready_0 = _sourceD_rreq_io_sourceD_radr_ready_T_2; // @[BankedStore.scala:59:7, :132:21]
assign _sourceD_rreq_out_index_T = sourceD_rreq_a[16:2]; // @[BankedStore.scala:126:91, :135:23]
assign sourceD_rreq_index = _sourceD_rreq_out_index_T; // @[BankedStore.scala:128:19, :135:23]
wire _sourceD_rreq_out_bankSel_T = sourceD_rreq_select[0]; // @[OneHot.scala:65:27]
wire _sourceD_rreq_out_bankSel_T_1 = sourceD_rreq_select[1]; // @[OneHot.scala:65:27]
wire _sourceD_rreq_out_bankSel_T_2 = sourceD_rreq_select[2]; // @[OneHot.scala:65:27]
wire _sourceD_rreq_out_bankSel_T_3 = sourceD_rreq_select[3]; // @[OneHot.scala:65:27]
wire [1:0] _sourceD_rreq_out_bankSel_T_4 = {2{_sourceD_rreq_out_bankSel_T}}; // @[BankedStore.scala:136:49]
wire [1:0] _sourceD_rreq_out_bankSel_T_5 = {2{_sourceD_rreq_out_bankSel_T_1}}; // @[BankedStore.scala:136:49]
wire [1:0] _sourceD_rreq_out_bankSel_T_6 = {2{_sourceD_rreq_out_bankSel_T_2}}; // @[BankedStore.scala:136:49]
wire [1:0] _sourceD_rreq_out_bankSel_T_7 = {2{_sourceD_rreq_out_bankSel_T_3}}; // @[BankedStore.scala:136:49]
wire [3:0] sourceD_rreq_out_bankSel_lo = {_sourceD_rreq_out_bankSel_T_5, _sourceD_rreq_out_bankSel_T_4}; // @[BankedStore.scala:136:49]
wire [3:0] sourceD_rreq_out_bankSel_hi = {_sourceD_rreq_out_bankSel_T_7, _sourceD_rreq_out_bankSel_T_6}; // @[BankedStore.scala:136:49]
wire [7:0] _sourceD_rreq_out_bankSel_T_8 = {sourceD_rreq_out_bankSel_hi, sourceD_rreq_out_bankSel_lo}; // @[BankedStore.scala:136:49]
wire [3:0] _sourceD_rreq_out_bankSel_T_9 = {2{io_sourceD_radr_bits_mask_0}}; // @[BankedStore.scala:59:7, :136:71]
wire [7:0] _sourceD_rreq_out_bankSel_T_10 = {2{_sourceD_rreq_out_bankSel_T_9}}; // @[BankedStore.scala:136:71]
wire [7:0] _sourceD_rreq_out_bankSel_T_11 = _sourceD_rreq_out_bankSel_T_8 & _sourceD_rreq_out_bankSel_T_10; // @[BankedStore.scala:136:{49,65,71}]
assign _sourceD_rreq_out_bankSel_T_12 = io_sourceD_radr_valid_0 ? _sourceD_rreq_out_bankSel_T_11 : 8'h0; // @[BankedStore.scala:59:7, :136:{24,65}]
assign sourceD_rreq_bankSel = _sourceD_rreq_out_bankSel_T_12; // @[BankedStore.scala:128:19, :136:24]
wire _sourceD_rreq_out_bankEn_T = sourceD_rreq_ready[0]; // @[BankedStore.scala:131:21, :137:72]
wire _sourceD_rreq_out_bankEn_T_1 = sourceD_rreq_ready[1]; // @[BankedStore.scala:131:21, :137:72]
wire _sourceD_rreq_out_bankEn_T_2 = sourceD_rreq_ready[2]; // @[BankedStore.scala:131:21, :137:72]
wire _sourceD_rreq_out_bankEn_T_3 = sourceD_rreq_ready[3]; // @[BankedStore.scala:131:21, :137:72]
wire [1:0] _sourceD_rreq_out_bankEn_T_4 = {2{_sourceD_rreq_out_bankEn_T}}; // @[BankedStore.scala:137:72]
wire [1:0] _sourceD_rreq_out_bankEn_T_5 = {2{_sourceD_rreq_out_bankEn_T_1}}; // @[BankedStore.scala:137:72]
wire [1:0] _sourceD_rreq_out_bankEn_T_6 = {2{_sourceD_rreq_out_bankEn_T_2}}; // @[BankedStore.scala:137:72]
wire [1:0] _sourceD_rreq_out_bankEn_T_7 = {2{_sourceD_rreq_out_bankEn_T_3}}; // @[BankedStore.scala:137:72]
wire [3:0] sourceD_rreq_out_bankEn_lo = {_sourceD_rreq_out_bankEn_T_5, _sourceD_rreq_out_bankEn_T_4}; // @[BankedStore.scala:137:72]
wire [3:0] sourceD_rreq_out_bankEn_hi = {_sourceD_rreq_out_bankEn_T_7, _sourceD_rreq_out_bankEn_T_6}; // @[BankedStore.scala:137:72]
wire [7:0] _sourceD_rreq_out_bankEn_T_8 = {sourceD_rreq_out_bankEn_hi, sourceD_rreq_out_bankEn_lo}; // @[BankedStore.scala:137:72]
wire [7:0] _sourceD_rreq_out_bankEn_T_9 = sourceD_rreq_bankSel & _sourceD_rreq_out_bankEn_T_8; // @[BankedStore.scala:128:19, :137:{55,72}]
assign _sourceD_rreq_out_bankEn_T_10 = _sourceD_rreq_out_bankEn_T_9; // @[BankedStore.scala:137:{24,55}]
assign sourceD_rreq_bankEn = _sourceD_rreq_out_bankEn_T_10; // @[BankedStore.scala:128:19, :137:24]
wire [63:0] sourceD_wreq_words_0 = io_sourceD_wdat_data_0[63:0]; // @[BankedStore.scala:59:7, :123:19]
wire [63:0] sourceD_wreq_data_0 = sourceD_wreq_words_0; // @[BankedStore.scala:123:19, :128:19]
wire [63:0] sourceD_wreq_data_2 = sourceD_wreq_words_0; // @[BankedStore.scala:123:19, :128:19]
wire [63:0] sourceD_wreq_data_4 = sourceD_wreq_words_0; // @[BankedStore.scala:123:19, :128:19]
wire [63:0] sourceD_wreq_data_6 = sourceD_wreq_words_0; // @[BankedStore.scala:123:19, :128:19]
wire [63:0] sourceD_wreq_words_1 = io_sourceD_wdat_data_0[127:64]; // @[BankedStore.scala:59:7, :123:19]
wire [63:0] sourceD_wreq_data_1 = sourceD_wreq_words_1; // @[BankedStore.scala:123:19, :128:19]
wire [63:0] sourceD_wreq_data_3 = sourceD_wreq_words_1; // @[BankedStore.scala:123:19, :128:19]
wire [63:0] sourceD_wreq_data_5 = sourceD_wreq_words_1; // @[BankedStore.scala:123:19, :128:19]
wire [63:0] sourceD_wreq_data_7 = sourceD_wreq_words_1; // @[BankedStore.scala:123:19, :128:19]
wire [14:0] sourceD_wreq_a_hi = {io_sourceD_wadr_bits_way_0, io_sourceD_wadr_bits_set_0}; // @[BankedStore.scala:59:7, :126:91]
wire [16:0] sourceD_wreq_a = {sourceD_wreq_a_hi, io_sourceD_wadr_bits_beat_0}; // @[BankedStore.scala:59:7, :126:91]
wire [14:0] _sourceD_wreq_out_index_T; // @[BankedStore.scala:135:23]
wire [7:0] _sourceD_wreq_out_bankSel_T_12; // @[BankedStore.scala:136:24]
wire [7:0] _sourceD_wreq_out_bankEn_T_10; // @[BankedStore.scala:137:24]
wire [14:0] sourceD_wreq_index; // @[BankedStore.scala:128:19]
wire [7:0] sourceD_wreq_bankSel; // @[BankedStore.scala:128:19]
wire [7:0] sourceD_wreq_bankSum; // @[BankedStore.scala:128:19]
wire [7:0] sourceD_wreq_bankEn; // @[BankedStore.scala:128:19]
wire [1:0] _sourceD_wreq_select_T = sourceD_wreq_a[1:0]; // @[BankedStore.scala:126:91, :130:28]
wire [1:0] _sourceD_wreq_io_sourceD_wadr_ready_T = sourceD_wreq_a[1:0]; // @[BankedStore.scala:126:91, :130:28, :132:23]
wire [1:0] sourceD_wreq_select_shiftAmount = _sourceD_wreq_select_T; // @[OneHot.scala:64:49]
wire [3:0] _sourceD_wreq_select_T_1 = 4'h1 << sourceD_wreq_select_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [3:0] sourceD_wreq_select = _sourceD_wreq_select_T_1; // @[OneHot.scala:65:{12,27}]
wire [1:0] _sourceD_wreq_ready_T = sourceD_wreq_bankSum[1:0]; // @[BankedStore.scala:128:19, :131:71]
wire [1:0] _sourceD_wreq_ready_T_1 = _sourceD_wreq_ready_T & io_sourceD_wadr_bits_mask_0; // @[BankedStore.scala:59:7, :131:{71,96}]
wire _sourceD_wreq_ready_T_2 = |_sourceD_wreq_ready_T_1; // @[BankedStore.scala:131:{96,101}]
wire _sourceD_wreq_ready_T_3 = ~_sourceD_wreq_ready_T_2; // @[BankedStore.scala:131:{58,101}]
wire [1:0] _sourceD_wreq_ready_T_4 = sourceD_wreq_bankSum[3:2]; // @[BankedStore.scala:128:19, :131:71]
wire [1:0] _sourceD_wreq_ready_T_5 = _sourceD_wreq_ready_T_4 & io_sourceD_wadr_bits_mask_0; // @[BankedStore.scala:59:7, :131:{71,96}]
wire _sourceD_wreq_ready_T_6 = |_sourceD_wreq_ready_T_5; // @[BankedStore.scala:131:{96,101}]
wire _sourceD_wreq_ready_T_7 = ~_sourceD_wreq_ready_T_6; // @[BankedStore.scala:131:{58,101}]
wire [1:0] _sourceD_wreq_ready_T_8 = sourceD_wreq_bankSum[5:4]; // @[BankedStore.scala:128:19, :131:71]
wire [1:0] _sourceD_wreq_ready_T_9 = _sourceD_wreq_ready_T_8 & io_sourceD_wadr_bits_mask_0; // @[BankedStore.scala:59:7, :131:{71,96}]
wire _sourceD_wreq_ready_T_10 = |_sourceD_wreq_ready_T_9; // @[BankedStore.scala:131:{96,101}]
wire _sourceD_wreq_ready_T_11 = ~_sourceD_wreq_ready_T_10; // @[BankedStore.scala:131:{58,101}]
wire [1:0] _sourceD_wreq_ready_T_12 = sourceD_wreq_bankSum[7:6]; // @[BankedStore.scala:128:19, :131:71]
wire [1:0] _sourceD_wreq_ready_T_13 = _sourceD_wreq_ready_T_12 & io_sourceD_wadr_bits_mask_0; // @[BankedStore.scala:59:7, :131:{71,96}]
wire _sourceD_wreq_ready_T_14 = |_sourceD_wreq_ready_T_13; // @[BankedStore.scala:131:{96,101}]
wire _sourceD_wreq_ready_T_15 = ~_sourceD_wreq_ready_T_14; // @[BankedStore.scala:131:{58,101}]
wire [1:0] sourceD_wreq_ready_lo = {_sourceD_wreq_ready_T_7, _sourceD_wreq_ready_T_3}; // @[BankedStore.scala:131:{21,58}]
wire [1:0] sourceD_wreq_ready_hi = {_sourceD_wreq_ready_T_15, _sourceD_wreq_ready_T_11}; // @[BankedStore.scala:131:{21,58}]
wire [3:0] sourceD_wreq_ready = {sourceD_wreq_ready_hi, sourceD_wreq_ready_lo}; // @[BankedStore.scala:131:21]
wire [3:0] _sourceD_wreq_io_sourceD_wadr_ready_T_1 = sourceD_wreq_ready >> _sourceD_wreq_io_sourceD_wadr_ready_T; // @[BankedStore.scala:131:21, :132:{21,23}]
assign _sourceD_wreq_io_sourceD_wadr_ready_T_2 = _sourceD_wreq_io_sourceD_wadr_ready_T_1[0]; // @[BankedStore.scala:132:21]
assign io_sourceD_wadr_ready_0 = _sourceD_wreq_io_sourceD_wadr_ready_T_2; // @[BankedStore.scala:59:7, :132:21]
assign _sourceD_wreq_out_index_T = sourceD_wreq_a[16:2]; // @[BankedStore.scala:126:91, :135:23]
assign sourceD_wreq_index = _sourceD_wreq_out_index_T; // @[BankedStore.scala:128:19, :135:23]
wire _sourceD_wreq_out_bankSel_T = sourceD_wreq_select[0]; // @[OneHot.scala:65:27]
wire _sourceD_wreq_out_bankSel_T_1 = sourceD_wreq_select[1]; // @[OneHot.scala:65:27]
wire _sourceD_wreq_out_bankSel_T_2 = sourceD_wreq_select[2]; // @[OneHot.scala:65:27]
wire _sourceD_wreq_out_bankSel_T_3 = sourceD_wreq_select[3]; // @[OneHot.scala:65:27]
wire [1:0] _sourceD_wreq_out_bankSel_T_4 = {2{_sourceD_wreq_out_bankSel_T}}; // @[BankedStore.scala:136:49]
wire [1:0] _sourceD_wreq_out_bankSel_T_5 = {2{_sourceD_wreq_out_bankSel_T_1}}; // @[BankedStore.scala:136:49]
wire [1:0] _sourceD_wreq_out_bankSel_T_6 = {2{_sourceD_wreq_out_bankSel_T_2}}; // @[BankedStore.scala:136:49]
wire [1:0] _sourceD_wreq_out_bankSel_T_7 = {2{_sourceD_wreq_out_bankSel_T_3}}; // @[BankedStore.scala:136:49]
wire [3:0] sourceD_wreq_out_bankSel_lo = {_sourceD_wreq_out_bankSel_T_5, _sourceD_wreq_out_bankSel_T_4}; // @[BankedStore.scala:136:49]
wire [3:0] sourceD_wreq_out_bankSel_hi = {_sourceD_wreq_out_bankSel_T_7, _sourceD_wreq_out_bankSel_T_6}; // @[BankedStore.scala:136:49]
wire [7:0] _sourceD_wreq_out_bankSel_T_8 = {sourceD_wreq_out_bankSel_hi, sourceD_wreq_out_bankSel_lo}; // @[BankedStore.scala:136:49]
wire [3:0] _sourceD_wreq_out_bankSel_T_9 = {2{io_sourceD_wadr_bits_mask_0}}; // @[BankedStore.scala:59:7, :136:71]
wire [7:0] _sourceD_wreq_out_bankSel_T_10 = {2{_sourceD_wreq_out_bankSel_T_9}}; // @[BankedStore.scala:136:71]
wire [7:0] _sourceD_wreq_out_bankSel_T_11 = _sourceD_wreq_out_bankSel_T_8 & _sourceD_wreq_out_bankSel_T_10; // @[BankedStore.scala:136:{49,65,71}]
assign _sourceD_wreq_out_bankSel_T_12 = io_sourceD_wadr_valid_0 ? _sourceD_wreq_out_bankSel_T_11 : 8'h0; // @[BankedStore.scala:59:7, :136:{24,65}]
assign sourceD_wreq_bankSel = _sourceD_wreq_out_bankSel_T_12; // @[BankedStore.scala:128:19, :136:24]
wire _sourceD_wreq_out_bankEn_T = sourceD_wreq_ready[0]; // @[BankedStore.scala:131:21, :137:72]
wire _sourceD_wreq_out_bankEn_T_1 = sourceD_wreq_ready[1]; // @[BankedStore.scala:131:21, :137:72]
wire _sourceD_wreq_out_bankEn_T_2 = sourceD_wreq_ready[2]; // @[BankedStore.scala:131:21, :137:72]
wire _sourceD_wreq_out_bankEn_T_3 = sourceD_wreq_ready[3]; // @[BankedStore.scala:131:21, :137:72]
wire [1:0] _sourceD_wreq_out_bankEn_T_4 = {2{_sourceD_wreq_out_bankEn_T}}; // @[BankedStore.scala:137:72]
wire [1:0] _sourceD_wreq_out_bankEn_T_5 = {2{_sourceD_wreq_out_bankEn_T_1}}; // @[BankedStore.scala:137:72]
wire [1:0] _sourceD_wreq_out_bankEn_T_6 = {2{_sourceD_wreq_out_bankEn_T_2}}; // @[BankedStore.scala:137:72]
wire [1:0] _sourceD_wreq_out_bankEn_T_7 = {2{_sourceD_wreq_out_bankEn_T_3}}; // @[BankedStore.scala:137:72]
wire [3:0] sourceD_wreq_out_bankEn_lo = {_sourceD_wreq_out_bankEn_T_5, _sourceD_wreq_out_bankEn_T_4}; // @[BankedStore.scala:137:72]
wire [3:0] sourceD_wreq_out_bankEn_hi = {_sourceD_wreq_out_bankEn_T_7, _sourceD_wreq_out_bankEn_T_6}; // @[BankedStore.scala:137:72]
wire [7:0] _sourceD_wreq_out_bankEn_T_8 = {sourceD_wreq_out_bankEn_hi, sourceD_wreq_out_bankEn_lo}; // @[BankedStore.scala:137:72]
wire [7:0] _sourceD_wreq_out_bankEn_T_9 = sourceD_wreq_bankSel & _sourceD_wreq_out_bankEn_T_8; // @[BankedStore.scala:128:19, :137:{55,72}]
assign _sourceD_wreq_out_bankEn_T_10 = _sourceD_wreq_out_bankEn_T_9; // @[BankedStore.scala:137:{24,55}]
assign sourceD_wreq_bankEn = _sourceD_wreq_out_bankEn_T_10; // @[BankedStore.scala:128:19, :137:24]
assign sinkD_req_bankSum = sourceC_req_bankSel | sinkC_req_bankSel; // @[BankedStore.scala:128:19, :161:17]
assign sourceD_wreq_bankSum = sinkD_req_bankSel | sinkD_req_bankSum; // @[BankedStore.scala:128:19, :161:17]
assign sourceD_rreq_bankSum = sourceD_wreq_bankSel | sourceD_wreq_bankSum; // @[BankedStore.scala:128:19, :161:17]
wire _regout_en_T = sinkC_req_bankEn[0]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_1 = sourceC_req_bankEn[0]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_2 = sinkD_req_bankEn[0]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_3 = sourceD_wreq_bankEn[0]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_4 = sourceD_rreq_bankEn[0]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_5 = _regout_en_T | _regout_en_T_1; // @[BankedStore.scala:165:{32,45}]
wire _regout_en_T_6 = _regout_en_T_5 | _regout_en_T_2; // @[BankedStore.scala:165:{32,45}]
wire _regout_en_T_7 = _regout_en_T_6 | _regout_en_T_3; // @[BankedStore.scala:165:{32,45}]
wire regout_en = _regout_en_T_7 | _regout_en_T_4; // @[BankedStore.scala:165:{32,45}]
wire regout_sel_0 = sinkC_req_bankSel[0]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_1 = sourceC_req_bankSel[0]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_2 = sinkD_req_bankSel[0]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_3 = sourceD_wreq_bankSel[0]; // @[BankedStore.scala:128:19, :166:33]
wire _regout_wen_T = regout_sel_3; // @[Mux.scala:50:70]
wire regout_sel_4 = sourceD_rreq_bankSel[0]; // @[BankedStore.scala:128:19, :166:33]
wire _regout_wen_T_1 = regout_sel_2 | _regout_wen_T; // @[Mux.scala:50:70]
wire _regout_wen_T_2 = ~regout_sel_1 & _regout_wen_T_1; // @[Mux.scala:50:70]
wire regout_wen = regout_sel_0 | _regout_wen_T_2; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T = regout_sel_3 ? sourceD_wreq_index : sourceD_rreq_index; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T_1 = regout_sel_2 ? sinkD_req_index : _regout_idx_T; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T_2 = regout_sel_1 ? sourceC_req_index : _regout_idx_T_1; // @[Mux.scala:50:70]
assign regout_idx = regout_sel_0 ? sinkC_req_index : _regout_idx_T_2; // @[Mux.scala:50:70]
assign _regout_WIRE = regout_idx; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T = regout_sel_3 ? sourceD_wreq_data_0 : 64'h0; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T_1 = regout_sel_2 ? sinkD_req_data_0 : _regout_data_T; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T_2 = regout_sel_1 ? 64'h0 : _regout_data_T_1; // @[Mux.scala:50:70]
wire [63:0] regout_data = regout_sel_0 ? sinkC_req_data_0 : _regout_data_T_2; // @[Mux.scala:50:70]
assign _regout_T = regout_wen & regout_en; // @[Mux.scala:50:70]
wire _regout_T_1 = ~regout_wen; // @[Mux.scala:50:70]
assign _regout_T_2 = _regout_T_1 & regout_en; // @[BankedStore.scala:165:45, :172:{27,32}]
wire _regout_T_3 = ~regout_wen; // @[Mux.scala:50:70]
wire _regout_T_4 = _regout_T_3 & regout_en; // @[BankedStore.scala:165:45, :172:{48,53}]
reg regout_REG; // @[BankedStore.scala:172:47]
reg [63:0] regout_r; // @[BankedStore.scala:172:14]
wire [63:0] regout_0 = regout_r; // @[BankedStore.scala:164:23, :172:14]
wire _regout_en_T_8 = sinkC_req_bankEn[1]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_9 = sourceC_req_bankEn[1]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_10 = sinkD_req_bankEn[1]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_11 = sourceD_wreq_bankEn[1]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_12 = sourceD_rreq_bankEn[1]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_13 = _regout_en_T_8 | _regout_en_T_9; // @[BankedStore.scala:165:{32,45}]
wire _regout_en_T_14 = _regout_en_T_13 | _regout_en_T_10; // @[BankedStore.scala:165:{32,45}]
wire _regout_en_T_15 = _regout_en_T_14 | _regout_en_T_11; // @[BankedStore.scala:165:{32,45}]
wire regout_en_1 = _regout_en_T_15 | _regout_en_T_12; // @[BankedStore.scala:165:{32,45}]
wire regout_sel_0_1 = sinkC_req_bankSel[1]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_1_1 = sourceC_req_bankSel[1]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_2_1 = sinkD_req_bankSel[1]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_3_1 = sourceD_wreq_bankSel[1]; // @[BankedStore.scala:128:19, :166:33]
wire _regout_wen_T_3 = regout_sel_3_1; // @[Mux.scala:50:70]
wire regout_sel_4_1 = sourceD_rreq_bankSel[1]; // @[BankedStore.scala:128:19, :166:33]
wire _regout_wen_T_4 = regout_sel_2_1 | _regout_wen_T_3; // @[Mux.scala:50:70]
wire _regout_wen_T_5 = ~regout_sel_1_1 & _regout_wen_T_4; // @[Mux.scala:50:70]
wire regout_wen_1 = regout_sel_0_1 | _regout_wen_T_5; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T_3 = regout_sel_3_1 ? sourceD_wreq_index : sourceD_rreq_index; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T_4 = regout_sel_2_1 ? sinkD_req_index : _regout_idx_T_3; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T_5 = regout_sel_1_1 ? sourceC_req_index : _regout_idx_T_4; // @[Mux.scala:50:70]
assign regout_idx_1 = regout_sel_0_1 ? sinkC_req_index : _regout_idx_T_5; // @[Mux.scala:50:70]
assign _regout_WIRE_1 = regout_idx_1; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T_3 = regout_sel_3_1 ? sourceD_wreq_data_1 : 64'h0; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T_4 = regout_sel_2_1 ? sinkD_req_data_1 : _regout_data_T_3; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T_5 = regout_sel_1_1 ? 64'h0 : _regout_data_T_4; // @[Mux.scala:50:70]
wire [63:0] regout_data_1 = regout_sel_0_1 ? sinkC_req_data_1 : _regout_data_T_5; // @[Mux.scala:50:70]
assign _regout_T_5 = regout_wen_1 & regout_en_1; // @[Mux.scala:50:70]
wire _regout_T_6 = ~regout_wen_1; // @[Mux.scala:50:70]
assign _regout_T_7 = _regout_T_6 & regout_en_1; // @[BankedStore.scala:165:45, :172:{27,32}]
wire _regout_T_8 = ~regout_wen_1; // @[Mux.scala:50:70]
wire _regout_T_9 = _regout_T_8 & regout_en_1; // @[BankedStore.scala:165:45, :172:{48,53}]
reg regout_REG_1; // @[BankedStore.scala:172:47]
reg [63:0] regout_r_1; // @[BankedStore.scala:172:14]
wire [63:0] regout_1 = regout_r_1; // @[BankedStore.scala:164:23, :172:14]
wire _regout_en_T_16 = sinkC_req_bankEn[2]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_17 = sourceC_req_bankEn[2]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_18 = sinkD_req_bankEn[2]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_19 = sourceD_wreq_bankEn[2]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_20 = sourceD_rreq_bankEn[2]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_21 = _regout_en_T_16 | _regout_en_T_17; // @[BankedStore.scala:165:{32,45}]
wire _regout_en_T_22 = _regout_en_T_21 | _regout_en_T_18; // @[BankedStore.scala:165:{32,45}]
wire _regout_en_T_23 = _regout_en_T_22 | _regout_en_T_19; // @[BankedStore.scala:165:{32,45}]
wire regout_en_2 = _regout_en_T_23 | _regout_en_T_20; // @[BankedStore.scala:165:{32,45}]
wire regout_sel_0_2 = sinkC_req_bankSel[2]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_1_2 = sourceC_req_bankSel[2]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_2_2 = sinkD_req_bankSel[2]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_3_2 = sourceD_wreq_bankSel[2]; // @[BankedStore.scala:128:19, :166:33]
wire _regout_wen_T_6 = regout_sel_3_2; // @[Mux.scala:50:70]
wire regout_sel_4_2 = sourceD_rreq_bankSel[2]; // @[BankedStore.scala:128:19, :166:33]
wire _regout_wen_T_7 = regout_sel_2_2 | _regout_wen_T_6; // @[Mux.scala:50:70]
wire _regout_wen_T_8 = ~regout_sel_1_2 & _regout_wen_T_7; // @[Mux.scala:50:70]
wire regout_wen_2 = regout_sel_0_2 | _regout_wen_T_8; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T_6 = regout_sel_3_2 ? sourceD_wreq_index : sourceD_rreq_index; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T_7 = regout_sel_2_2 ? sinkD_req_index : _regout_idx_T_6; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T_8 = regout_sel_1_2 ? sourceC_req_index : _regout_idx_T_7; // @[Mux.scala:50:70]
assign regout_idx_2 = regout_sel_0_2 ? sinkC_req_index : _regout_idx_T_8; // @[Mux.scala:50:70]
assign _regout_WIRE_2 = regout_idx_2; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T_6 = regout_sel_3_2 ? sourceD_wreq_data_2 : 64'h0; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T_7 = regout_sel_2_2 ? sinkD_req_data_2 : _regout_data_T_6; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T_8 = regout_sel_1_2 ? 64'h0 : _regout_data_T_7; // @[Mux.scala:50:70]
wire [63:0] regout_data_2 = regout_sel_0_2 ? sinkC_req_data_2 : _regout_data_T_8; // @[Mux.scala:50:70]
assign _regout_T_10 = regout_wen_2 & regout_en_2; // @[Mux.scala:50:70]
wire _regout_T_11 = ~regout_wen_2; // @[Mux.scala:50:70]
assign _regout_T_12 = _regout_T_11 & regout_en_2; // @[BankedStore.scala:165:45, :172:{27,32}]
wire _regout_T_13 = ~regout_wen_2; // @[Mux.scala:50:70]
wire _regout_T_14 = _regout_T_13 & regout_en_2; // @[BankedStore.scala:165:45, :172:{48,53}]
reg regout_REG_2; // @[BankedStore.scala:172:47]
reg [63:0] regout_r_2; // @[BankedStore.scala:172:14]
wire [63:0] regout_2 = regout_r_2; // @[BankedStore.scala:164:23, :172:14]
wire _regout_en_T_24 = sinkC_req_bankEn[3]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_25 = sourceC_req_bankEn[3]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_26 = sinkD_req_bankEn[3]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_27 = sourceD_wreq_bankEn[3]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_28 = sourceD_rreq_bankEn[3]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_29 = _regout_en_T_24 | _regout_en_T_25; // @[BankedStore.scala:165:{32,45}]
wire _regout_en_T_30 = _regout_en_T_29 | _regout_en_T_26; // @[BankedStore.scala:165:{32,45}]
wire _regout_en_T_31 = _regout_en_T_30 | _regout_en_T_27; // @[BankedStore.scala:165:{32,45}]
wire regout_en_3 = _regout_en_T_31 | _regout_en_T_28; // @[BankedStore.scala:165:{32,45}]
wire regout_sel_0_3 = sinkC_req_bankSel[3]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_1_3 = sourceC_req_bankSel[3]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_2_3 = sinkD_req_bankSel[3]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_3_3 = sourceD_wreq_bankSel[3]; // @[BankedStore.scala:128:19, :166:33]
wire _regout_wen_T_9 = regout_sel_3_3; // @[Mux.scala:50:70]
wire regout_sel_4_3 = sourceD_rreq_bankSel[3]; // @[BankedStore.scala:128:19, :166:33]
wire _regout_wen_T_10 = regout_sel_2_3 | _regout_wen_T_9; // @[Mux.scala:50:70]
wire _regout_wen_T_11 = ~regout_sel_1_3 & _regout_wen_T_10; // @[Mux.scala:50:70]
wire regout_wen_3 = regout_sel_0_3 | _regout_wen_T_11; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T_9 = regout_sel_3_3 ? sourceD_wreq_index : sourceD_rreq_index; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T_10 = regout_sel_2_3 ? sinkD_req_index : _regout_idx_T_9; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T_11 = regout_sel_1_3 ? sourceC_req_index : _regout_idx_T_10; // @[Mux.scala:50:70]
assign regout_idx_3 = regout_sel_0_3 ? sinkC_req_index : _regout_idx_T_11; // @[Mux.scala:50:70]
assign _regout_WIRE_3 = regout_idx_3; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T_9 = regout_sel_3_3 ? sourceD_wreq_data_3 : 64'h0; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T_10 = regout_sel_2_3 ? sinkD_req_data_3 : _regout_data_T_9; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T_11 = regout_sel_1_3 ? 64'h0 : _regout_data_T_10; // @[Mux.scala:50:70]
wire [63:0] regout_data_3 = regout_sel_0_3 ? sinkC_req_data_3 : _regout_data_T_11; // @[Mux.scala:50:70]
assign _regout_T_15 = regout_wen_3 & regout_en_3; // @[Mux.scala:50:70]
wire _regout_T_16 = ~regout_wen_3; // @[Mux.scala:50:70]
assign _regout_T_17 = _regout_T_16 & regout_en_3; // @[BankedStore.scala:165:45, :172:{27,32}]
wire _regout_T_18 = ~regout_wen_3; // @[Mux.scala:50:70]
wire _regout_T_19 = _regout_T_18 & regout_en_3; // @[BankedStore.scala:165:45, :172:{48,53}]
reg regout_REG_3; // @[BankedStore.scala:172:47]
reg [63:0] regout_r_3; // @[BankedStore.scala:172:14]
wire [63:0] regout_3 = regout_r_3; // @[BankedStore.scala:164:23, :172:14]
wire _regout_en_T_32 = sinkC_req_bankEn[4]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_33 = sourceC_req_bankEn[4]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_34 = sinkD_req_bankEn[4]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_35 = sourceD_wreq_bankEn[4]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_36 = sourceD_rreq_bankEn[4]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_37 = _regout_en_T_32 | _regout_en_T_33; // @[BankedStore.scala:165:{32,45}]
wire _regout_en_T_38 = _regout_en_T_37 | _regout_en_T_34; // @[BankedStore.scala:165:{32,45}]
wire _regout_en_T_39 = _regout_en_T_38 | _regout_en_T_35; // @[BankedStore.scala:165:{32,45}]
wire regout_en_4 = _regout_en_T_39 | _regout_en_T_36; // @[BankedStore.scala:165:{32,45}]
wire regout_sel_0_4 = sinkC_req_bankSel[4]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_1_4 = sourceC_req_bankSel[4]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_2_4 = sinkD_req_bankSel[4]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_3_4 = sourceD_wreq_bankSel[4]; // @[BankedStore.scala:128:19, :166:33]
wire _regout_wen_T_12 = regout_sel_3_4; // @[Mux.scala:50:70]
wire regout_sel_4_4 = sourceD_rreq_bankSel[4]; // @[BankedStore.scala:128:19, :166:33]
wire _regout_wen_T_13 = regout_sel_2_4 | _regout_wen_T_12; // @[Mux.scala:50:70]
wire _regout_wen_T_14 = ~regout_sel_1_4 & _regout_wen_T_13; // @[Mux.scala:50:70]
wire regout_wen_4 = regout_sel_0_4 | _regout_wen_T_14; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T_12 = regout_sel_3_4 ? sourceD_wreq_index : sourceD_rreq_index; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T_13 = regout_sel_2_4 ? sinkD_req_index : _regout_idx_T_12; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T_14 = regout_sel_1_4 ? sourceC_req_index : _regout_idx_T_13; // @[Mux.scala:50:70]
assign regout_idx_4 = regout_sel_0_4 ? sinkC_req_index : _regout_idx_T_14; // @[Mux.scala:50:70]
assign _regout_WIRE_4 = regout_idx_4; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T_12 = regout_sel_3_4 ? sourceD_wreq_data_4 : 64'h0; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T_13 = regout_sel_2_4 ? sinkD_req_data_4 : _regout_data_T_12; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T_14 = regout_sel_1_4 ? 64'h0 : _regout_data_T_13; // @[Mux.scala:50:70]
wire [63:0] regout_data_4 = regout_sel_0_4 ? sinkC_req_data_4 : _regout_data_T_14; // @[Mux.scala:50:70]
assign _regout_T_20 = regout_wen_4 & regout_en_4; // @[Mux.scala:50:70]
wire _regout_T_21 = ~regout_wen_4; // @[Mux.scala:50:70]
assign _regout_T_22 = _regout_T_21 & regout_en_4; // @[BankedStore.scala:165:45, :172:{27,32}]
wire _regout_T_23 = ~regout_wen_4; // @[Mux.scala:50:70]
wire _regout_T_24 = _regout_T_23 & regout_en_4; // @[BankedStore.scala:165:45, :172:{48,53}]
reg regout_REG_4; // @[BankedStore.scala:172:47]
reg [63:0] regout_r_4; // @[BankedStore.scala:172:14]
wire [63:0] regout_4 = regout_r_4; // @[BankedStore.scala:164:23, :172:14]
wire _regout_en_T_40 = sinkC_req_bankEn[5]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_41 = sourceC_req_bankEn[5]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_42 = sinkD_req_bankEn[5]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_43 = sourceD_wreq_bankEn[5]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_44 = sourceD_rreq_bankEn[5]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_45 = _regout_en_T_40 | _regout_en_T_41; // @[BankedStore.scala:165:{32,45}]
wire _regout_en_T_46 = _regout_en_T_45 | _regout_en_T_42; // @[BankedStore.scala:165:{32,45}]
wire _regout_en_T_47 = _regout_en_T_46 | _regout_en_T_43; // @[BankedStore.scala:165:{32,45}]
wire regout_en_5 = _regout_en_T_47 | _regout_en_T_44; // @[BankedStore.scala:165:{32,45}]
wire regout_sel_0_5 = sinkC_req_bankSel[5]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_1_5 = sourceC_req_bankSel[5]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_2_5 = sinkD_req_bankSel[5]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_3_5 = sourceD_wreq_bankSel[5]; // @[BankedStore.scala:128:19, :166:33]
wire _regout_wen_T_15 = regout_sel_3_5; // @[Mux.scala:50:70]
wire regout_sel_4_5 = sourceD_rreq_bankSel[5]; // @[BankedStore.scala:128:19, :166:33]
wire _regout_wen_T_16 = regout_sel_2_5 | _regout_wen_T_15; // @[Mux.scala:50:70]
wire _regout_wen_T_17 = ~regout_sel_1_5 & _regout_wen_T_16; // @[Mux.scala:50:70]
wire regout_wen_5 = regout_sel_0_5 | _regout_wen_T_17; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T_15 = regout_sel_3_5 ? sourceD_wreq_index : sourceD_rreq_index; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T_16 = regout_sel_2_5 ? sinkD_req_index : _regout_idx_T_15; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T_17 = regout_sel_1_5 ? sourceC_req_index : _regout_idx_T_16; // @[Mux.scala:50:70]
assign regout_idx_5 = regout_sel_0_5 ? sinkC_req_index : _regout_idx_T_17; // @[Mux.scala:50:70]
assign _regout_WIRE_5 = regout_idx_5; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T_15 = regout_sel_3_5 ? sourceD_wreq_data_5 : 64'h0; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T_16 = regout_sel_2_5 ? sinkD_req_data_5 : _regout_data_T_15; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T_17 = regout_sel_1_5 ? 64'h0 : _regout_data_T_16; // @[Mux.scala:50:70]
wire [63:0] regout_data_5 = regout_sel_0_5 ? sinkC_req_data_5 : _regout_data_T_17; // @[Mux.scala:50:70]
assign _regout_T_25 = regout_wen_5 & regout_en_5; // @[Mux.scala:50:70]
wire _regout_T_26 = ~regout_wen_5; // @[Mux.scala:50:70]
assign _regout_T_27 = _regout_T_26 & regout_en_5; // @[BankedStore.scala:165:45, :172:{27,32}]
wire _regout_T_28 = ~regout_wen_5; // @[Mux.scala:50:70]
wire _regout_T_29 = _regout_T_28 & regout_en_5; // @[BankedStore.scala:165:45, :172:{48,53}]
reg regout_REG_5; // @[BankedStore.scala:172:47]
reg [63:0] regout_r_5; // @[BankedStore.scala:172:14]
wire [63:0] regout_5 = regout_r_5; // @[BankedStore.scala:164:23, :172:14]
wire _regout_en_T_48 = sinkC_req_bankEn[6]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_49 = sourceC_req_bankEn[6]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_50 = sinkD_req_bankEn[6]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_51 = sourceD_wreq_bankEn[6]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_52 = sourceD_rreq_bankEn[6]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_53 = _regout_en_T_48 | _regout_en_T_49; // @[BankedStore.scala:165:{32,45}]
wire _regout_en_T_54 = _regout_en_T_53 | _regout_en_T_50; // @[BankedStore.scala:165:{32,45}]
wire _regout_en_T_55 = _regout_en_T_54 | _regout_en_T_51; // @[BankedStore.scala:165:{32,45}]
wire regout_en_6 = _regout_en_T_55 | _regout_en_T_52; // @[BankedStore.scala:165:{32,45}]
wire regout_sel_0_6 = sinkC_req_bankSel[6]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_1_6 = sourceC_req_bankSel[6]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_2_6 = sinkD_req_bankSel[6]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_3_6 = sourceD_wreq_bankSel[6]; // @[BankedStore.scala:128:19, :166:33]
wire _regout_wen_T_18 = regout_sel_3_6; // @[Mux.scala:50:70]
wire regout_sel_4_6 = sourceD_rreq_bankSel[6]; // @[BankedStore.scala:128:19, :166:33]
wire _regout_wen_T_19 = regout_sel_2_6 | _regout_wen_T_18; // @[Mux.scala:50:70]
wire _regout_wen_T_20 = ~regout_sel_1_6 & _regout_wen_T_19; // @[Mux.scala:50:70]
wire regout_wen_6 = regout_sel_0_6 | _regout_wen_T_20; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T_18 = regout_sel_3_6 ? sourceD_wreq_index : sourceD_rreq_index; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T_19 = regout_sel_2_6 ? sinkD_req_index : _regout_idx_T_18; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T_20 = regout_sel_1_6 ? sourceC_req_index : _regout_idx_T_19; // @[Mux.scala:50:70]
assign regout_idx_6 = regout_sel_0_6 ? sinkC_req_index : _regout_idx_T_20; // @[Mux.scala:50:70]
assign _regout_WIRE_6 = regout_idx_6; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T_18 = regout_sel_3_6 ? sourceD_wreq_data_6 : 64'h0; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T_19 = regout_sel_2_6 ? sinkD_req_data_6 : _regout_data_T_18; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T_20 = regout_sel_1_6 ? 64'h0 : _regout_data_T_19; // @[Mux.scala:50:70]
wire [63:0] regout_data_6 = regout_sel_0_6 ? sinkC_req_data_6 : _regout_data_T_20; // @[Mux.scala:50:70]
assign _regout_T_30 = regout_wen_6 & regout_en_6; // @[Mux.scala:50:70]
wire _regout_T_31 = ~regout_wen_6; // @[Mux.scala:50:70]
assign _regout_T_32 = _regout_T_31 & regout_en_6; // @[BankedStore.scala:165:45, :172:{27,32}]
wire _regout_T_33 = ~regout_wen_6; // @[Mux.scala:50:70]
wire _regout_T_34 = _regout_T_33 & regout_en_6; // @[BankedStore.scala:165:45, :172:{48,53}]
reg regout_REG_6; // @[BankedStore.scala:172:47]
reg [63:0] regout_r_6; // @[BankedStore.scala:172:14]
wire [63:0] regout_6 = regout_r_6; // @[BankedStore.scala:164:23, :172:14]
wire _regout_en_T_56 = sinkC_req_bankEn[7]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_57 = sourceC_req_bankEn[7]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_58 = sinkD_req_bankEn[7]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_59 = sourceD_wreq_bankEn[7]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_60 = sourceD_rreq_bankEn[7]; // @[BankedStore.scala:128:19, :165:32]
wire _regout_en_T_61 = _regout_en_T_56 | _regout_en_T_57; // @[BankedStore.scala:165:{32,45}]
wire _regout_en_T_62 = _regout_en_T_61 | _regout_en_T_58; // @[BankedStore.scala:165:{32,45}]
wire _regout_en_T_63 = _regout_en_T_62 | _regout_en_T_59; // @[BankedStore.scala:165:{32,45}]
wire regout_en_7 = _regout_en_T_63 | _regout_en_T_60; // @[BankedStore.scala:165:{32,45}]
wire regout_sel_0_7 = sinkC_req_bankSel[7]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_1_7 = sourceC_req_bankSel[7]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_2_7 = sinkD_req_bankSel[7]; // @[BankedStore.scala:128:19, :166:33]
wire regout_sel_3_7 = sourceD_wreq_bankSel[7]; // @[BankedStore.scala:128:19, :166:33]
wire _regout_wen_T_21 = regout_sel_3_7; // @[Mux.scala:50:70]
wire regout_sel_4_7 = sourceD_rreq_bankSel[7]; // @[BankedStore.scala:128:19, :166:33]
wire _regout_wen_T_22 = regout_sel_2_7 | _regout_wen_T_21; // @[Mux.scala:50:70]
wire _regout_wen_T_23 = ~regout_sel_1_7 & _regout_wen_T_22; // @[Mux.scala:50:70]
wire regout_wen_7 = regout_sel_0_7 | _regout_wen_T_23; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T_21 = regout_sel_3_7 ? sourceD_wreq_index : sourceD_rreq_index; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T_22 = regout_sel_2_7 ? sinkD_req_index : _regout_idx_T_21; // @[Mux.scala:50:70]
wire [14:0] _regout_idx_T_23 = regout_sel_1_7 ? sourceC_req_index : _regout_idx_T_22; // @[Mux.scala:50:70]
assign regout_idx_7 = regout_sel_0_7 ? sinkC_req_index : _regout_idx_T_23; // @[Mux.scala:50:70]
assign _regout_WIRE_7 = regout_idx_7; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T_21 = regout_sel_3_7 ? sourceD_wreq_data_7 : 64'h0; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T_22 = regout_sel_2_7 ? sinkD_req_data_7 : _regout_data_T_21; // @[Mux.scala:50:70]
wire [63:0] _regout_data_T_23 = regout_sel_1_7 ? 64'h0 : _regout_data_T_22; // @[Mux.scala:50:70]
wire [63:0] regout_data_7 = regout_sel_0_7 ? sinkC_req_data_7 : _regout_data_T_23; // @[Mux.scala:50:70]
assign _regout_T_35 = regout_wen_7 & regout_en_7; // @[Mux.scala:50:70]
wire _regout_T_36 = ~regout_wen_7; // @[Mux.scala:50:70]
assign _regout_T_37 = _regout_T_36 & regout_en_7; // @[BankedStore.scala:165:45, :172:{27,32}]
wire _regout_T_38 = ~regout_wen_7; // @[Mux.scala:50:70]
wire _regout_T_39 = _regout_T_38 & regout_en_7; // @[BankedStore.scala:165:45, :172:{48,53}]
reg regout_REG_7; // @[BankedStore.scala:172:47]
reg [63:0] regout_r_7; // @[BankedStore.scala:172:14]
wire [63:0] regout_7 = regout_r_7; // @[BankedStore.scala:164:23, :172:14]
reg [7:0] regsel_sourceC_REG; // @[BankedStore.scala:175:39]
reg [7:0] regsel_sourceC; // @[BankedStore.scala:175:31]
reg [7:0] regsel_sourceD_REG; // @[BankedStore.scala:176:39]
reg [7:0] regsel_sourceD; // @[BankedStore.scala:176:31]
wire _decodeC_T = regsel_sourceC[0]; // @[BankedStore.scala:175:31, :179:38]
wire [63:0] _decodeC_T_1 = _decodeC_T ? regout_0 : 64'h0; // @[BankedStore.scala:164:23, :179:{23,38}]
wire _decodeC_T_2 = regsel_sourceC[1]; // @[BankedStore.scala:175:31, :179:38]
wire [63:0] _decodeC_T_3 = _decodeC_T_2 ? regout_1 : 64'h0; // @[BankedStore.scala:164:23, :179:{23,38}]
wire _decodeC_T_4 = regsel_sourceC[2]; // @[BankedStore.scala:175:31, :179:38]
wire [63:0] _decodeC_T_5 = _decodeC_T_4 ? regout_2 : 64'h0; // @[BankedStore.scala:164:23, :179:{23,38}]
wire _decodeC_T_6 = regsel_sourceC[3]; // @[BankedStore.scala:175:31, :179:38]
wire [63:0] _decodeC_T_7 = _decodeC_T_6 ? regout_3 : 64'h0; // @[BankedStore.scala:164:23, :179:{23,38}]
wire _decodeC_T_8 = regsel_sourceC[4]; // @[BankedStore.scala:175:31, :179:38]
wire [63:0] _decodeC_T_9 = _decodeC_T_8 ? regout_4 : 64'h0; // @[BankedStore.scala:164:23, :179:{23,38}]
wire _decodeC_T_10 = regsel_sourceC[5]; // @[BankedStore.scala:175:31, :179:38]
wire [63:0] _decodeC_T_11 = _decodeC_T_10 ? regout_5 : 64'h0; // @[BankedStore.scala:164:23, :179:{23,38}]
wire _decodeC_T_12 = regsel_sourceC[6]; // @[BankedStore.scala:175:31, :179:38]
wire [63:0] _decodeC_T_13 = _decodeC_T_12 ? regout_6 : 64'h0; // @[BankedStore.scala:164:23, :179:{23,38}]
wire _decodeC_T_14 = regsel_sourceC[7]; // @[BankedStore.scala:175:31, :179:38]
wire [63:0] _decodeC_T_15 = _decodeC_T_14 ? regout_7 : 64'h0; // @[BankedStore.scala:164:23, :179:{23,38}]
wire [63:0] _decodeC_T_16 = _decodeC_T_1 | _decodeC_T_3; // @[BankedStore.scala:179:23, :180:85]
wire [63:0] _decodeC_T_17 = _decodeC_T_16 | _decodeC_T_5; // @[BankedStore.scala:179:23, :180:85]
wire [63:0] _decodeC_T_18 = _decodeC_T_17 | _decodeC_T_7; // @[BankedStore.scala:179:23, :180:85]
wire [63:0] _decodeC_T_19 = _decodeC_T_18 | _decodeC_T_9; // @[BankedStore.scala:179:23, :180:85]
wire [63:0] _decodeC_T_20 = _decodeC_T_19 | _decodeC_T_11; // @[BankedStore.scala:179:23, :180:85]
wire [63:0] _decodeC_T_21 = _decodeC_T_20 | _decodeC_T_13; // @[BankedStore.scala:179:23, :180:85]
assign decodeC_0 = _decodeC_T_21 | _decodeC_T_15; // @[BankedStore.scala:179:23, :180:85]
assign io_sourceC_dat_data_0 = decodeC_0; // @[BankedStore.scala:59:7, :180:85]
wire _decodeD_T = regsel_sourceD[0]; // @[BankedStore.scala:176:31, :186:38]
wire [63:0] _decodeD_T_1 = _decodeD_T ? regout_0 : 64'h0; // @[BankedStore.scala:164:23, :186:{23,38}]
wire _decodeD_T_2 = regsel_sourceD[1]; // @[BankedStore.scala:176:31, :186:38]
wire [63:0] _decodeD_T_3 = _decodeD_T_2 ? regout_1 : 64'h0; // @[BankedStore.scala:164:23, :186:{23,38}]
wire _decodeD_T_4 = regsel_sourceD[2]; // @[BankedStore.scala:176:31, :186:38]
wire [63:0] _decodeD_T_5 = _decodeD_T_4 ? regout_2 : 64'h0; // @[BankedStore.scala:164:23, :186:{23,38}]
wire _decodeD_T_6 = regsel_sourceD[3]; // @[BankedStore.scala:176:31, :186:38]
wire [63:0] _decodeD_T_7 = _decodeD_T_6 ? regout_3 : 64'h0; // @[BankedStore.scala:164:23, :186:{23,38}]
wire _decodeD_T_8 = regsel_sourceD[4]; // @[BankedStore.scala:176:31, :186:38]
wire [63:0] _decodeD_T_9 = _decodeD_T_8 ? regout_4 : 64'h0; // @[BankedStore.scala:164:23, :186:{23,38}]
wire _decodeD_T_10 = regsel_sourceD[5]; // @[BankedStore.scala:176:31, :186:38]
wire [63:0] _decodeD_T_11 = _decodeD_T_10 ? regout_5 : 64'h0; // @[BankedStore.scala:164:23, :186:{23,38}]
wire _decodeD_T_12 = regsel_sourceD[6]; // @[BankedStore.scala:176:31, :186:38]
wire [63:0] _decodeD_T_13 = _decodeD_T_12 ? regout_6 : 64'h0; // @[BankedStore.scala:164:23, :186:{23,38}]
wire _decodeD_T_14 = regsel_sourceD[7]; // @[BankedStore.scala:176:31, :186:38]
wire [63:0] _decodeD_T_15 = _decodeD_T_14 ? regout_7 : 64'h0; // @[BankedStore.scala:164:23, :186:{23,38}]
wire [63:0] _decodeD_T_16 = _decodeD_T_1 | _decodeD_T_5; // @[BankedStore.scala:186:23, :187:85]
wire [63:0] _decodeD_T_17 = _decodeD_T_16 | _decodeD_T_9; // @[BankedStore.scala:186:23, :187:85]
wire [63:0] decodeD_0 = _decodeD_T_17 | _decodeD_T_13; // @[BankedStore.scala:186:23, :187:85]
wire [63:0] _decodeD_T_18 = _decodeD_T_3 | _decodeD_T_7; // @[BankedStore.scala:186:23, :187:85]
wire [63:0] _decodeD_T_19 = _decodeD_T_18 | _decodeD_T_11; // @[BankedStore.scala:186:23, :187:85]
wire [63:0] decodeD_1 = _decodeD_T_19 | _decodeD_T_15; // @[BankedStore.scala:186:23, :187:85]
assign _io_sourceD_rdat_data_T = {decodeD_1, decodeD_0}; // @[BankedStore.scala:187:85, :189:30]
assign io_sourceD_rdat_data_0 = _io_sourceD_rdat_data_T; // @[BankedStore.scala:59:7, :189:30]
always @(posedge clock) begin // @[BankedStore.scala:59:7]
regout_REG <= _regout_T_4; // @[BankedStore.scala:172:{47,53}]
if (regout_REG) // @[BankedStore.scala:172:47]
regout_r <= _cc_banks_0_RW0_rdata; // @[DescribedSRAM.scala:17:26]
regout_REG_1 <= _regout_T_9; // @[BankedStore.scala:172:{47,53}]
if (regout_REG_1) // @[BankedStore.scala:172:47]
regout_r_1 <= _cc_banks_1_RW0_rdata; // @[DescribedSRAM.scala:17:26]
regout_REG_2 <= _regout_T_14; // @[BankedStore.scala:172:{47,53}]
if (regout_REG_2) // @[BankedStore.scala:172:47]
regout_r_2 <= _cc_banks_2_RW0_rdata; // @[DescribedSRAM.scala:17:26]
regout_REG_3 <= _regout_T_19; // @[BankedStore.scala:172:{47,53}]
if (regout_REG_3) // @[BankedStore.scala:172:47]
regout_r_3 <= _cc_banks_3_RW0_rdata; // @[DescribedSRAM.scala:17:26]
regout_REG_4 <= _regout_T_24; // @[BankedStore.scala:172:{47,53}]
if (regout_REG_4) // @[BankedStore.scala:172:47]
regout_r_4 <= _cc_banks_4_RW0_rdata; // @[DescribedSRAM.scala:17:26]
regout_REG_5 <= _regout_T_29; // @[BankedStore.scala:172:{47,53}]
if (regout_REG_5) // @[BankedStore.scala:172:47]
regout_r_5 <= _cc_banks_5_RW0_rdata; // @[DescribedSRAM.scala:17:26]
regout_REG_6 <= _regout_T_34; // @[BankedStore.scala:172:{47,53}]
if (regout_REG_6) // @[BankedStore.scala:172:47]
regout_r_6 <= _cc_banks_6_RW0_rdata; // @[DescribedSRAM.scala:17:26]
regout_REG_7 <= _regout_T_39; // @[BankedStore.scala:172:{47,53}]
if (regout_REG_7) // @[BankedStore.scala:172:47]
regout_r_7 <= _cc_banks_7_RW0_rdata; // @[DescribedSRAM.scala:17:26]
regsel_sourceC_REG <= sourceC_req_bankEn; // @[BankedStore.scala:128:19, :175:39]
regsel_sourceC <= regsel_sourceC_REG; // @[BankedStore.scala:175:{31,39}]
regsel_sourceD_REG <= sourceD_rreq_bankEn; // @[BankedStore.scala:128:19, :176:39]
regsel_sourceD <= regsel_sourceD_REG; // @[BankedStore.scala:176:{31,39}]
always @(posedge)
cc_banks_0_6 cc_banks_0 ( // @[DescribedSRAM.scala:17:26]
.RW0_addr (_regout_T ? regout_idx : _regout_WIRE), // @[Mux.scala:50:70]
.RW0_en (_regout_T_2 | _regout_T), // @[DescribedSRAM.scala:17:26]
.RW0_clk (clock),
.RW0_wmode (regout_wen), // @[Mux.scala:50:70]
.RW0_wdata (regout_data), // @[Mux.scala:50:70]
.RW0_rdata (_cc_banks_0_RW0_rdata)
); // @[DescribedSRAM.scala:17:26]
cc_banks_1_6 cc_banks_1 ( // @[DescribedSRAM.scala:17:26]
.RW0_addr (_regout_T_5 ? regout_idx_1 : _regout_WIRE_1), // @[Mux.scala:50:70]
.RW0_en (_regout_T_7 | _regout_T_5), // @[DescribedSRAM.scala:17:26]
.RW0_clk (clock),
.RW0_wmode (regout_wen_1), // @[Mux.scala:50:70]
.RW0_wdata (regout_data_1), // @[Mux.scala:50:70]
.RW0_rdata (_cc_banks_1_RW0_rdata)
); // @[DescribedSRAM.scala:17:26]
cc_banks_2_6 cc_banks_2 ( // @[DescribedSRAM.scala:17:26]
.RW0_addr (_regout_T_10 ? regout_idx_2 : _regout_WIRE_2), // @[Mux.scala:50:70]
.RW0_en (_regout_T_12 | _regout_T_10), // @[DescribedSRAM.scala:17:26]
.RW0_clk (clock),
.RW0_wmode (regout_wen_2), // @[Mux.scala:50:70]
.RW0_wdata (regout_data_2), // @[Mux.scala:50:70]
.RW0_rdata (_cc_banks_2_RW0_rdata)
); // @[DescribedSRAM.scala:17:26]
cc_banks_3_6 cc_banks_3 ( // @[DescribedSRAM.scala:17:26]
.RW0_addr (_regout_T_15 ? regout_idx_3 : _regout_WIRE_3), // @[Mux.scala:50:70]
.RW0_en (_regout_T_17 | _regout_T_15), // @[DescribedSRAM.scala:17:26]
.RW0_clk (clock),
.RW0_wmode (regout_wen_3), // @[Mux.scala:50:70]
.RW0_wdata (regout_data_3), // @[Mux.scala:50:70]
.RW0_rdata (_cc_banks_3_RW0_rdata)
); // @[DescribedSRAM.scala:17:26]
cc_banks_4_6 cc_banks_4 ( // @[DescribedSRAM.scala:17:26]
.RW0_addr (_regout_T_20 ? regout_idx_4 : _regout_WIRE_4), // @[Mux.scala:50:70]
.RW0_en (_regout_T_22 | _regout_T_20), // @[DescribedSRAM.scala:17:26]
.RW0_clk (clock),
.RW0_wmode (regout_wen_4), // @[Mux.scala:50:70]
.RW0_wdata (regout_data_4), // @[Mux.scala:50:70]
.RW0_rdata (_cc_banks_4_RW0_rdata)
); // @[DescribedSRAM.scala:17:26]
cc_banks_5_6 cc_banks_5 ( // @[DescribedSRAM.scala:17:26]
.RW0_addr (_regout_T_25 ? regout_idx_5 : _regout_WIRE_5), // @[Mux.scala:50:70]
.RW0_en (_regout_T_27 | _regout_T_25), // @[DescribedSRAM.scala:17:26]
.RW0_clk (clock),
.RW0_wmode (regout_wen_5), // @[Mux.scala:50:70]
.RW0_wdata (regout_data_5), // @[Mux.scala:50:70]
.RW0_rdata (_cc_banks_5_RW0_rdata)
); // @[DescribedSRAM.scala:17:26]
cc_banks_6_6 cc_banks_6 ( // @[DescribedSRAM.scala:17:26]
.RW0_addr (_regout_T_30 ? regout_idx_6 : _regout_WIRE_6), // @[Mux.scala:50:70]
.RW0_en (_regout_T_32 | _regout_T_30), // @[DescribedSRAM.scala:17:26]
.RW0_clk (clock),
.RW0_wmode (regout_wen_6), // @[Mux.scala:50:70]
.RW0_wdata (regout_data_6), // @[Mux.scala:50:70]
.RW0_rdata (_cc_banks_6_RW0_rdata)
); // @[DescribedSRAM.scala:17:26]
cc_banks_7_6 cc_banks_7 ( // @[DescribedSRAM.scala:17:26]
.RW0_addr (_regout_T_35 ? regout_idx_7 : _regout_WIRE_7), // @[Mux.scala:50:70]
.RW0_en (_regout_T_37 | _regout_T_35), // @[DescribedSRAM.scala:17:26]
.RW0_clk (clock),
.RW0_wmode (regout_wen_7), // @[Mux.scala:50:70]
.RW0_wdata (regout_data_7), // @[Mux.scala:50:70]
.RW0_rdata (_cc_banks_7_RW0_rdata)
); // @[DescribedSRAM.scala:17:26]
assign io_sinkC_adr_ready = io_sinkC_adr_ready_0; // @[BankedStore.scala:59:7]
assign io_sinkD_adr_ready = io_sinkD_adr_ready_0; // @[BankedStore.scala:59:7]
assign io_sourceC_adr_ready = io_sourceC_adr_ready_0; // @[BankedStore.scala:59:7]
assign io_sourceC_dat_data = io_sourceC_dat_data_0; // @[BankedStore.scala:59:7]
assign io_sourceD_radr_ready = io_sourceD_radr_ready_0; // @[BankedStore.scala:59:7]
assign io_sourceD_rdat_data = io_sourceD_rdat_data_0; // @[BankedStore.scala:59:7]
assign io_sourceD_wadr_ready = io_sourceD_wadr_ready_0; // @[BankedStore.scala:59:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RecFNToIN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 The Regents of
the University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.log2Up
import scala.math._
import consts._
class RecFNToIN(expWidth: Int, sigWidth: Int, intWidth: Int) extends chisel3.Module
{
override def desiredName = s"RecFNToIN_e${expWidth}_s${sigWidth}_i${intWidth}"
val io = IO(new Bundle {
val in = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val signedOut = Input(Bool())
val out = Output(Bits(intWidth.W))
val intExceptionFlags = Output(Bits(3.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawIn = rawFloatFromRecFN(expWidth, sigWidth, io.in)
val magGeOne = rawIn.sExp(expWidth)
val posExp = rawIn.sExp(expWidth - 1, 0)
val magJustBelowOne = !magGeOne && posExp.andR
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
/*------------------------------------------------------------------------
| Assuming the input floating-point value is not a NaN, its magnitude is
| at least 1, and it is not obviously so large as to lead to overflow,
| convert its significand to fixed-point (i.e., with the binary point in a
| fixed location). For a non-NaN input with a magnitude less than 1, this
| expression contrives to ensure that the integer bits of 'alignedSig'
| will all be zeros.
*------------------------------------------------------------------------*/
val shiftedSig =
(magGeOne ## rawIn.sig(sigWidth - 2, 0))<<
Mux(magGeOne,
rawIn.sExp(min(expWidth - 2, log2Up(intWidth) - 1), 0),
0.U
)
val alignedSig =
(shiftedSig>>(sigWidth - 2)) ## shiftedSig(sigWidth - 3, 0).orR
val unroundedInt = 0.U(intWidth.W) | alignedSig>>2
val common_inexact = Mux(magGeOne, alignedSig(1, 0).orR, !rawIn.isZero)
val roundIncr_near_even =
(magGeOne && (alignedSig(2, 1).andR || alignedSig(1, 0).andR)) ||
(magJustBelowOne && alignedSig(1, 0).orR)
val roundIncr_near_maxMag = (magGeOne && alignedSig(1)) || magJustBelowOne
val roundIncr =
(roundingMode_near_even && roundIncr_near_even ) ||
(roundingMode_near_maxMag && roundIncr_near_maxMag) ||
((roundingMode_min || roundingMode_odd) &&
(rawIn.sign && common_inexact)) ||
(roundingMode_max && (!rawIn.sign && common_inexact))
val complUnroundedInt = Mux(rawIn.sign, ~unroundedInt, unroundedInt)
val roundedInt =
Mux(roundIncr ^ rawIn.sign,
complUnroundedInt + 1.U,
complUnroundedInt
) | (roundingMode_odd && common_inexact)
val magGeOne_atOverflowEdge = (posExp === (intWidth - 1).U)
//*** CHANGE TO TAKE BITS FROM THE ORIGINAL 'rawIn.sig' INSTEAD OF FROM
//*** 'unroundedInt'?:
val roundCarryBut2 = unroundedInt(intWidth - 3, 0).andR && roundIncr
val common_overflow =
Mux(magGeOne,
(posExp >= intWidth.U) ||
Mux(io.signedOut,
Mux(rawIn.sign,
magGeOne_atOverflowEdge &&
(unroundedInt(intWidth - 2, 0).orR || roundIncr),
magGeOne_atOverflowEdge ||
((posExp === (intWidth - 2).U) && roundCarryBut2)
),
rawIn.sign ||
(magGeOne_atOverflowEdge &&
unroundedInt(intWidth - 2) && roundCarryBut2)
),
!io.signedOut && rawIn.sign && roundIncr
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val invalidExc = rawIn.isNaN || rawIn.isInf
val overflow = !invalidExc && common_overflow
val inexact = !invalidExc && !common_overflow && common_inexact
val excSign = !rawIn.isNaN && rawIn.sign
val excOut =
Mux((io.signedOut === excSign),
(BigInt(1)<<(intWidth - 1)).U,
0.U
) |
Mux(!excSign, ((BigInt(1)<<(intWidth - 1)) - 1).U, 0.U)
io.out := Mux(invalidExc || common_overflow, excOut, roundedInt)
io.intExceptionFlags := invalidExc ## overflow ## inexact
}
File rawFloatFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
/*----------------------------------------------------------------------------
| In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be
| set.
*----------------------------------------------------------------------------*/
object rawFloatFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat =
{
val exp = in(expWidth + sigWidth - 1, sigWidth - 1)
val isZero = exp(expWidth, expWidth - 2) === 0.U
val isSpecial = exp(expWidth, expWidth - 1) === 3.U
val out = Wire(new RawFloat(expWidth, sigWidth))
out.isNaN := isSpecial && exp(expWidth - 2)
out.isInf := isSpecial && ! exp(expWidth - 2)
out.isZero := isZero
out.sign := in(expWidth + sigWidth)
out.sExp := exp.zext
out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0)
out
}
}
| module RecFNToIN_e11_s53_i32_2( // @[RecFNToIN.scala:46:7]
input clock, // @[RecFNToIN.scala:46:7]
input reset, // @[RecFNToIN.scala:46:7]
input [64:0] io_in, // @[RecFNToIN.scala:49:16]
input [2:0] io_roundingMode, // @[RecFNToIN.scala:49:16]
input io_signedOut, // @[RecFNToIN.scala:49:16]
output [2:0] io_intExceptionFlags // @[RecFNToIN.scala:49:16]
);
wire [64:0] io_in_0 = io_in; // @[RecFNToIN.scala:46:7]
wire [2:0] io_roundingMode_0 = io_roundingMode; // @[RecFNToIN.scala:46:7]
wire io_signedOut_0 = io_signedOut; // @[RecFNToIN.scala:46:7]
wire [31:0] _io_out_T_1; // @[RecFNToIN.scala:145:18]
wire [2:0] _io_intExceptionFlags_T_1; // @[RecFNToIN.scala:146:52]
wire [31:0] io_out; // @[RecFNToIN.scala:46:7]
wire [2:0] io_intExceptionFlags_0; // @[RecFNToIN.scala:46:7]
wire [11:0] rawIn_exp = io_in_0[63:52]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _rawIn_isZero_T = rawIn_exp[11:9]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire rawIn_isZero = _rawIn_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
wire rawIn_isZero_0 = rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _rawIn_isSpecial_T = rawIn_exp[11:10]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire rawIn_isSpecial = &_rawIn_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
wire _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [12:0] _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [53:0] _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire rawIn_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [12:0] rawIn_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [53:0] rawIn_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _rawIn_out_isNaN_T = rawIn_exp[9]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _rawIn_out_isInf_T = rawIn_exp[9]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _rawIn_out_isNaN_T_1 = rawIn_isSpecial & _rawIn_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign rawIn_isNaN = _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _rawIn_out_isInf_T_1 = ~_rawIn_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _rawIn_out_isInf_T_2 = rawIn_isSpecial & _rawIn_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign rawIn_isInf = _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _rawIn_out_sign_T = io_in_0[64]; // @[rawFloatFromRecFN.scala:59:25]
assign rawIn_sign = _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _rawIn_out_sExp_T = {1'h0, rawIn_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign rawIn_sExp = _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _rawIn_out_sig_T = ~rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _rawIn_out_sig_T_1 = {1'h0, _rawIn_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [51:0] _rawIn_out_sig_T_2 = io_in_0[51:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _rawIn_out_sig_T_3 = {_rawIn_out_sig_T_1, _rawIn_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign rawIn_sig = _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
wire magGeOne = rawIn_sExp[11]; // @[rawFloatFromRecFN.scala:55:23]
wire [10:0] posExp = rawIn_sExp[10:0]; // @[rawFloatFromRecFN.scala:55:23]
wire _magJustBelowOne_T = ~magGeOne; // @[RecFNToIN.scala:61:30, :63:27]
wire _magJustBelowOne_T_1 = &posExp; // @[RecFNToIN.scala:62:28, :63:47]
wire magJustBelowOne = _magJustBelowOne_T & _magJustBelowOne_T_1; // @[RecFNToIN.scala:63:{27,37,47}]
wire roundingMode_near_even = io_roundingMode_0 == 3'h0; // @[rawFloatFromRecFN.scala:52:53]
wire roundingMode_minMag = io_roundingMode_0 == 3'h1; // @[RecFNToIN.scala:46:7, :68:53]
wire roundingMode_min = io_roundingMode_0 == 3'h2; // @[RecFNToIN.scala:46:7, :69:53]
wire roundingMode_max = io_roundingMode_0 == 3'h3; // @[RecFNToIN.scala:46:7, :70:53]
wire roundingMode_near_maxMag = io_roundingMode_0 == 3'h4; // @[RecFNToIN.scala:46:7, :71:53]
wire roundingMode_odd = io_roundingMode_0 == 3'h6; // @[RecFNToIN.scala:46:7, :72:53]
wire [51:0] _shiftedSig_T = rawIn_sig[51:0]; // @[rawFloatFromRecFN.scala:55:23]
wire [52:0] _shiftedSig_T_1 = {magGeOne, _shiftedSig_T}; // @[RecFNToIN.scala:61:30, :83:{19,31}]
wire [4:0] _shiftedSig_T_2 = rawIn_sExp[4:0]; // @[rawFloatFromRecFN.scala:55:23]
wire [4:0] _shiftedSig_T_3 = magGeOne ? _shiftedSig_T_2 : 5'h0; // @[RecFNToIN.scala:61:30, :84:16, :85:27]
wire [83:0] shiftedSig = {31'h0, _shiftedSig_T_1} << _shiftedSig_T_3; // @[RecFNToIN.scala:83:{19,49}, :84:16]
wire [32:0] _alignedSig_T = shiftedSig[83:51]; // @[RecFNToIN.scala:83:49, :89:20]
wire [50:0] _alignedSig_T_1 = shiftedSig[50:0]; // @[RecFNToIN.scala:83:49, :89:51]
wire _alignedSig_T_2 = |_alignedSig_T_1; // @[RecFNToIN.scala:89:{51,69}]
wire [33:0] alignedSig = {_alignedSig_T, _alignedSig_T_2}; // @[RecFNToIN.scala:89:{20,38,69}]
wire [31:0] _unroundedInt_T = alignedSig[33:2]; // @[RecFNToIN.scala:89:38, :90:52]
wire [31:0] unroundedInt = _unroundedInt_T; // @[RecFNToIN.scala:90:{40,52}]
wire [1:0] _common_inexact_T = alignedSig[1:0]; // @[RecFNToIN.scala:89:38, :92:50]
wire [1:0] _roundIncr_near_even_T_2 = alignedSig[1:0]; // @[RecFNToIN.scala:89:38, :92:50, :94:64]
wire [1:0] _roundIncr_near_even_T_6 = alignedSig[1:0]; // @[RecFNToIN.scala:89:38, :92:50, :95:39]
wire _common_inexact_T_1 = |_common_inexact_T; // @[RecFNToIN.scala:92:{50,57}]
wire _common_inexact_T_2 = ~rawIn_isZero_0; // @[rawFloatFromRecFN.scala:55:23]
wire common_inexact = magGeOne ? _common_inexact_T_1 : _common_inexact_T_2; // @[RecFNToIN.scala:61:30, :92:{29,57,62}]
wire [1:0] _roundIncr_near_even_T = alignedSig[2:1]; // @[RecFNToIN.scala:89:38, :94:39]
wire _roundIncr_near_even_T_1 = &_roundIncr_near_even_T; // @[RecFNToIN.scala:94:{39,46}]
wire _roundIncr_near_even_T_3 = &_roundIncr_near_even_T_2; // @[RecFNToIN.scala:94:{64,71}]
wire _roundIncr_near_even_T_4 = _roundIncr_near_even_T_1 | _roundIncr_near_even_T_3; // @[RecFNToIN.scala:94:{46,51,71}]
wire _roundIncr_near_even_T_5 = magGeOne & _roundIncr_near_even_T_4; // @[RecFNToIN.scala:61:30, :94:{25,51}]
wire _roundIncr_near_even_T_7 = |_roundIncr_near_even_T_6; // @[RecFNToIN.scala:95:{39,46}]
wire _roundIncr_near_even_T_8 = magJustBelowOne & _roundIncr_near_even_T_7; // @[RecFNToIN.scala:63:37, :95:{26,46}]
wire roundIncr_near_even = _roundIncr_near_even_T_5 | _roundIncr_near_even_T_8; // @[RecFNToIN.scala:94:{25,78}, :95:26]
wire _roundIncr_near_maxMag_T = alignedSig[1]; // @[RecFNToIN.scala:89:38, :96:56]
wire _roundIncr_near_maxMag_T_1 = magGeOne & _roundIncr_near_maxMag_T; // @[RecFNToIN.scala:61:30, :96:{43,56}]
wire roundIncr_near_maxMag = _roundIncr_near_maxMag_T_1 | magJustBelowOne; // @[RecFNToIN.scala:63:37, :96:{43,61}]
wire _roundIncr_T = roundingMode_near_even & roundIncr_near_even; // @[RecFNToIN.scala:67:53, :94:78, :98:35]
wire _roundIncr_T_1 = roundingMode_near_maxMag & roundIncr_near_maxMag; // @[RecFNToIN.scala:71:53, :96:61, :99:35]
wire _roundIncr_T_2 = _roundIncr_T | _roundIncr_T_1; // @[RecFNToIN.scala:98:{35,61}, :99:35]
wire _roundIncr_T_3 = roundingMode_min | roundingMode_odd; // @[RecFNToIN.scala:69:53, :72:53, :100:28]
wire _roundIncr_T_4 = rawIn_sign & common_inexact; // @[rawFloatFromRecFN.scala:55:23]
wire _roundIncr_T_5 = _roundIncr_T_3 & _roundIncr_T_4; // @[RecFNToIN.scala:100:{28,49}, :101:26]
wire _roundIncr_T_6 = _roundIncr_T_2 | _roundIncr_T_5; // @[RecFNToIN.scala:98:61, :99:61, :100:49]
wire _roundIncr_T_7 = ~rawIn_sign; // @[rawFloatFromRecFN.scala:55:23]
wire _roundIncr_T_8 = _roundIncr_T_7 & common_inexact; // @[RecFNToIN.scala:92:29, :102:{31,43}]
wire _roundIncr_T_9 = roundingMode_max & _roundIncr_T_8; // @[RecFNToIN.scala:70:53, :102:{27,43}]
wire roundIncr = _roundIncr_T_6 | _roundIncr_T_9; // @[RecFNToIN.scala:99:61, :101:46, :102:27]
wire [31:0] _complUnroundedInt_T = ~unroundedInt; // @[RecFNToIN.scala:90:40, :103:45]
wire [31:0] complUnroundedInt = rawIn_sign ? _complUnroundedInt_T : unroundedInt; // @[rawFloatFromRecFN.scala:55:23]
wire _roundedInt_T = roundIncr ^ rawIn_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [32:0] _roundedInt_T_1 = {1'h0, complUnroundedInt} + 33'h1; // @[RecFNToIN.scala:103:32, :106:31]
wire [31:0] _roundedInt_T_2 = _roundedInt_T_1[31:0]; // @[RecFNToIN.scala:106:31]
wire [31:0] _roundedInt_T_3 = _roundedInt_T ? _roundedInt_T_2 : complUnroundedInt; // @[RecFNToIN.scala:103:32, :105:{12,23}, :106:31]
wire _roundedInt_T_4 = roundingMode_odd & common_inexact; // @[RecFNToIN.scala:72:53, :92:29, :108:31]
wire [31:0] roundedInt = {_roundedInt_T_3[31:1], _roundedInt_T_3[0] | _roundedInt_T_4}; // @[RecFNToIN.scala:105:12, :108:{11,31}]
wire magGeOne_atOverflowEdge = posExp == 11'h1F; // @[RecFNToIN.scala:62:28, :110:43]
wire [29:0] _roundCarryBut2_T = unroundedInt[29:0]; // @[RecFNToIN.scala:90:40, :113:38]
wire _roundCarryBut2_T_1 = &_roundCarryBut2_T; // @[RecFNToIN.scala:113:{38,56}]
wire roundCarryBut2 = _roundCarryBut2_T_1 & roundIncr; // @[RecFNToIN.scala:101:46, :113:{56,61}]
wire _common_overflow_T = |(posExp[10:5]); // @[RecFNToIN.scala:62:28, :116:21]
wire [30:0] _common_overflow_T_1 = unroundedInt[30:0]; // @[RecFNToIN.scala:90:40, :120:42]
wire _common_overflow_T_2 = |_common_overflow_T_1; // @[RecFNToIN.scala:120:{42,60}]
wire _common_overflow_T_3 = _common_overflow_T_2 | roundIncr; // @[RecFNToIN.scala:101:46, :120:{60,64}]
wire _common_overflow_T_4 = magGeOne_atOverflowEdge & _common_overflow_T_3; // @[RecFNToIN.scala:110:43, :119:49, :120:64]
wire _common_overflow_T_5 = posExp == 11'h1E; // @[RecFNToIN.scala:62:28, :122:38]
wire _common_overflow_T_6 = _common_overflow_T_5 & roundCarryBut2; // @[RecFNToIN.scala:113:61, :122:{38,60}]
wire _common_overflow_T_7 = magGeOne_atOverflowEdge | _common_overflow_T_6; // @[RecFNToIN.scala:110:43, :121:49, :122:60]
wire _common_overflow_T_8 = rawIn_sign ? _common_overflow_T_4 : _common_overflow_T_7; // @[rawFloatFromRecFN.scala:55:23]
wire _common_overflow_T_9 = unroundedInt[30]; // @[RecFNToIN.scala:90:40, :126:42]
wire _common_overflow_T_10 = magGeOne_atOverflowEdge & _common_overflow_T_9; // @[RecFNToIN.scala:110:43, :125:50, :126:42]
wire _common_overflow_T_11 = _common_overflow_T_10 & roundCarryBut2; // @[RecFNToIN.scala:113:61, :125:50, :126:57]
wire _common_overflow_T_12 = rawIn_sign | _common_overflow_T_11; // @[rawFloatFromRecFN.scala:55:23]
wire _common_overflow_T_13 = io_signedOut_0 ? _common_overflow_T_8 : _common_overflow_T_12; // @[RecFNToIN.scala:46:7, :117:20, :118:24, :124:32]
wire _common_overflow_T_14 = _common_overflow_T | _common_overflow_T_13; // @[RecFNToIN.scala:116:{21,36}, :117:20]
wire _common_overflow_T_15 = ~io_signedOut_0; // @[RecFNToIN.scala:46:7, :128:13]
wire _common_overflow_T_16 = _common_overflow_T_15 & rawIn_sign; // @[rawFloatFromRecFN.scala:55:23]
wire _common_overflow_T_17 = _common_overflow_T_16 & roundIncr; // @[RecFNToIN.scala:101:46, :128:{27,41}]
wire common_overflow = magGeOne ? _common_overflow_T_14 : _common_overflow_T_17; // @[RecFNToIN.scala:61:30, :115:12, :116:36, :128:41]
wire invalidExc = rawIn_isNaN | rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire _overflow_T = ~invalidExc; // @[RecFNToIN.scala:133:34, :134:20]
wire overflow = _overflow_T & common_overflow; // @[RecFNToIN.scala:115:12, :134:{20,32}]
wire _inexact_T = ~invalidExc; // @[RecFNToIN.scala:133:34, :134:20, :135:20]
wire _inexact_T_1 = ~common_overflow; // @[RecFNToIN.scala:115:12, :135:35]
wire _inexact_T_2 = _inexact_T & _inexact_T_1; // @[RecFNToIN.scala:135:{20,32,35}]
wire inexact = _inexact_T_2 & common_inexact; // @[RecFNToIN.scala:92:29, :135:{32,52}]
wire _excSign_T = ~rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire excSign = _excSign_T & rawIn_sign; // @[rawFloatFromRecFN.scala:55:23]
wire _excOut_T = io_signedOut_0 == excSign; // @[RecFNToIN.scala:46:7, :137:32, :139:27]
wire [31:0] _excOut_T_1 = {_excOut_T, 31'h0}; // @[RecFNToIN.scala:139:{12,27}]
wire _excOut_T_2 = ~excSign; // @[RecFNToIN.scala:137:32, :143:13]
wire [30:0] _excOut_T_3 = {31{_excOut_T_2}}; // @[RecFNToIN.scala:143:{12,13}]
wire [31:0] excOut = {_excOut_T_1[31], _excOut_T_1[30:0] | _excOut_T_3}; // @[RecFNToIN.scala:139:12, :142:11, :143:12]
wire _io_out_T = invalidExc | common_overflow; // @[RecFNToIN.scala:115:12, :133:34, :145:30]
assign _io_out_T_1 = _io_out_T ? excOut : roundedInt; // @[RecFNToIN.scala:108:11, :142:11, :145:{18,30}]
assign io_out = _io_out_T_1; // @[RecFNToIN.scala:46:7, :145:18]
wire [1:0] _io_intExceptionFlags_T = {invalidExc, overflow}; // @[RecFNToIN.scala:133:34, :134:32, :146:40]
assign _io_intExceptionFlags_T_1 = {_io_intExceptionFlags_T, inexact}; // @[RecFNToIN.scala:135:52, :146:{40,52}]
assign io_intExceptionFlags_0 = _io_intExceptionFlags_T_1; // @[RecFNToIN.scala:46:7, :146:52]
assign io_intExceptionFlags = io_intExceptionFlags_0; // @[RecFNToIN.scala:46:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Serdes.scala:
package testchipip.serdes
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config._
class GenericSerializer[T <: Data](t: T, flitWidth: Int) extends Module {
override def desiredName = s"GenericSerializer_${t.typeName}w${t.getWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(t))
val out = Decoupled(new Flit(flitWidth))
val busy = Output(Bool())
})
val dataBits = t.getWidth.max(flitWidth)
val dataBeats = (dataBits - 1) / flitWidth + 1
require(dataBeats >= 1)
val data = Reg(Vec(dataBeats, UInt(flitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready && beat === 0.U
io.out.valid := io.in.valid || beat =/= 0.U
io.out.bits.flit := Mux(beat === 0.U, io.in.bits.asUInt, data(beat))
when (io.out.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) {
data := io.in.bits.asTypeOf(Vec(dataBeats, UInt(flitWidth.W)))
data(0) := DontCare // unused, DCE this
}
}
io.busy := io.out.valid
}
class GenericDeserializer[T <: Data](t: T, flitWidth: Int) extends Module {
override def desiredName = s"GenericDeserializer_${t.typeName}w${t.getWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(t)
val busy = Output(Bool())
})
val dataBits = t.getWidth.max(flitWidth)
val dataBeats = (dataBits - 1) / flitWidth + 1
require(dataBeats >= 1)
val data = Reg(Vec(dataBeats-1, UInt(flitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready || beat =/= (dataBeats-1).U
io.out.valid := io.in.valid && beat === (dataBeats-1).U
io.out.bits := (if (dataBeats == 1) {
io.in.bits.flit.asTypeOf(t)
} else {
Cat(io.in.bits.flit, data.asUInt).asTypeOf(t)
})
when (io.in.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
if (dataBeats > 1) {
when (beat =/= (dataBeats-1).U) {
data(beat(log2Ceil(dataBeats-1)-1,0)) := io.in.bits.flit
}
}
}
io.busy := beat =/= 0.U
}
class FlitToPhit(flitWidth: Int, phitWidth: Int) extends Module {
override def desiredName = s"FlitToPhit_f${flitWidth}_p${phitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Phit(phitWidth))
})
require(flitWidth >= phitWidth)
val dataBeats = (flitWidth - 1) / phitWidth + 1
val data = Reg(Vec(dataBeats-1, UInt(phitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready && beat === 0.U
io.out.valid := io.in.valid || beat =/= 0.U
io.out.bits.phit := (if (dataBeats == 1) io.in.bits.flit else Mux(beat === 0.U, io.in.bits.flit, data(beat-1.U)))
when (io.out.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) {
data := io.in.bits.asTypeOf(Vec(dataBeats, UInt(phitWidth.W))).tail
}
}
}
object FlitToPhit {
def apply(flit: DecoupledIO[Flit], phitWidth: Int): DecoupledIO[Phit] = {
val flit2phit = Module(new FlitToPhit(flit.bits.flitWidth, phitWidth))
flit2phit.io.in <> flit
flit2phit.io.out
}
}
class PhitToFlit(flitWidth: Int, phitWidth: Int) extends Module {
override def desiredName = s"PhitToFlit_p${phitWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Phit(phitWidth)))
val out = Decoupled(new Flit(flitWidth))
})
require(flitWidth >= phitWidth)
val dataBeats = (flitWidth - 1) / phitWidth + 1
val data = Reg(Vec(dataBeats-1, UInt(phitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready || beat =/= (dataBeats-1).U
io.out.valid := io.in.valid && beat === (dataBeats-1).U
io.out.bits.flit := (if (dataBeats == 1) io.in.bits.phit else Cat(io.in.bits.phit, data.asUInt))
when (io.in.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
if (dataBeats > 1) {
when (beat =/= (dataBeats-1).U) {
data(beat) := io.in.bits.phit
}
}
}
}
object PhitToFlit {
def apply(phit: DecoupledIO[Phit], flitWidth: Int): DecoupledIO[Flit] = {
val phit2flit = Module(new PhitToFlit(flitWidth, phit.bits.phitWidth))
phit2flit.io.in <> phit
phit2flit.io.out
}
def apply(phit: ValidIO[Phit], flitWidth: Int): ValidIO[Flit] = {
val phit2flit = Module(new PhitToFlit(flitWidth, phit.bits.phitWidth))
phit2flit.io.in.valid := phit.valid
phit2flit.io.in.bits := phit.bits
when (phit.valid) { assert(phit2flit.io.in.ready) }
val out = Wire(Valid(new Flit(flitWidth)))
out.valid := phit2flit.io.out.valid
out.bits := phit2flit.io.out.bits
phit2flit.io.out.ready := true.B
out
}
}
class PhitArbiter(phitWidth: Int, flitWidth: Int, channels: Int) extends Module {
override def desiredName = s"PhitArbiter_p${phitWidth}_f${flitWidth}_n${channels}"
val io = IO(new Bundle {
val in = Flipped(Vec(channels, Decoupled(new Phit(phitWidth))))
val out = Decoupled(new Phit(phitWidth))
})
if (channels == 1) {
io.out <> io.in(0)
} else {
val headerWidth = log2Ceil(channels)
val headerBeats = (headerWidth - 1) / phitWidth + 1
val flitBeats = (flitWidth - 1) / phitWidth + 1
val beats = headerBeats + flitBeats
val beat = RegInit(0.U(log2Ceil(beats).W))
val chosen_reg = Reg(UInt(headerWidth.W))
val chosen_prio = PriorityEncoder(io.in.map(_.valid))
val chosen = Mux(beat === 0.U, chosen_prio, chosen_reg)
val header_idx = if (headerBeats == 1) 0.U else beat(log2Ceil(headerBeats)-1,0)
io.out.valid := VecInit(io.in.map(_.valid))(chosen)
io.out.bits.phit := Mux(beat < headerBeats.U,
chosen.asTypeOf(Vec(headerBeats, UInt(phitWidth.W)))(header_idx),
VecInit(io.in.map(_.bits.phit))(chosen))
for (i <- 0 until channels) {
io.in(i).ready := io.out.ready && beat >= headerBeats.U && chosen_reg === i.U
}
when (io.out.fire) {
beat := Mux(beat === (beats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) { chosen_reg := chosen_prio }
}
}
}
class PhitDemux(phitWidth: Int, flitWidth: Int, channels: Int) extends Module {
override def desiredName = s"PhitDemux_p${phitWidth}_f${flitWidth}_n${channels}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Phit(phitWidth)))
val out = Vec(channels, Decoupled(new Phit(phitWidth)))
})
if (channels == 1) {
io.out(0) <> io.in
} else {
val headerWidth = log2Ceil(channels)
val headerBeats = (headerWidth - 1) / phitWidth + 1
val flitBeats = (flitWidth - 1) / phitWidth + 1
val beats = headerBeats + flitBeats
val beat = RegInit(0.U(log2Ceil(beats).W))
val channel_vec = Reg(Vec(headerBeats, UInt(phitWidth.W)))
val channel = channel_vec.asUInt(log2Ceil(channels)-1,0)
val header_idx = if (headerBeats == 1) 0.U else beat(log2Ceil(headerBeats)-1,0)
io.in.ready := beat < headerBeats.U || VecInit(io.out.map(_.ready))(channel)
for (c <- 0 until channels) {
io.out(c).valid := io.in.valid && beat >= headerBeats.U && channel === c.U
io.out(c).bits.phit := io.in.bits.phit
}
when (io.in.fire) {
beat := Mux(beat === (beats-1).U, 0.U, beat + 1.U)
when (beat < headerBeats.U) {
channel_vec(header_idx) := io.in.bits.phit
}
}
}
}
class DecoupledFlitToCreditedFlit(flitWidth: Int, bufferSz: Int) extends Module {
override def desiredName = s"DecoupledFlitToCreditedFlit_f${flitWidth}_b${bufferSz}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Flit(flitWidth))
val credit = Flipped(Decoupled(new Flit(flitWidth)))
})
val creditWidth = log2Ceil(bufferSz)
require(creditWidth <= flitWidth)
val credits = RegInit(0.U((creditWidth+1).W))
val credit_incr = io.out.fire
val credit_decr = io.credit.fire
when (credit_incr || credit_decr) {
credits := credits + credit_incr - Mux(io.credit.valid, io.credit.bits.flit +& 1.U, 0.U)
}
io.out.valid := io.in.valid && credits < bufferSz.U
io.out.bits.flit := io.in.bits.flit
io.in.ready := io.out.ready && credits < bufferSz.U
io.credit.ready := true.B
}
class CreditedFlitToDecoupledFlit(flitWidth: Int, bufferSz: Int) extends Module {
override def desiredName = s"CreditedFlitToDecoupledFlit_f${flitWidth}_b${bufferSz}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Flit(flitWidth))
val credit = Decoupled(new Flit(flitWidth))
})
val creditWidth = log2Ceil(bufferSz)
require(creditWidth <= flitWidth)
val buffer = Module(new Queue(new Flit(flitWidth), bufferSz))
val credits = RegInit(0.U((creditWidth+1).W))
val credit_incr = buffer.io.deq.fire
val credit_decr = io.credit.fire
when (credit_incr || credit_decr) {
credits := credit_incr + Mux(credit_decr, 0.U, credits)
}
buffer.io.enq.valid := io.in.valid
buffer.io.enq.bits := io.in.bits
io.in.ready := true.B
when (io.in.valid) { assert(buffer.io.enq.ready) }
io.out <> buffer.io.deq
io.credit.valid := credits =/= 0.U
io.credit.bits.flit := credits - 1.U
}
| module GenericSerializer_TLBeatw88_f32( // @[Serdes.scala:8:7]
input clock, // @[Serdes.scala:8:7]
input reset, // @[Serdes.scala:8:7]
output io_in_ready, // @[Serdes.scala:10:14]
input io_in_valid, // @[Serdes.scala:10:14]
input [85:0] io_in_bits_payload, // @[Serdes.scala:10:14]
input io_in_bits_head, // @[Serdes.scala:10:14]
input io_in_bits_tail, // @[Serdes.scala:10:14]
input io_out_ready, // @[Serdes.scala:10:14]
output io_out_valid, // @[Serdes.scala:10:14]
output [31:0] io_out_bits_flit // @[Serdes.scala:10:14]
);
wire [0:0][31:0] _GEN = '{32'h0};
reg [31:0] data_1; // @[Serdes.scala:19:17]
reg [31:0] data_2; // @[Serdes.scala:19:17]
reg [1:0] beat; // @[Serdes.scala:20:21]
wire _io_out_bits_flit_T = beat == 2'h0; // @[Serdes.scala:8:7, :20:21, :22:39]
wire [2:0] _GEN_0 = {io_in_valid, beat}; // @[Serdes.scala:20:21, :23:{31,39}]
wire [3:0][31:0] _GEN_1 = {_GEN, {{data_2}, {data_1}, {32'h0}}}; // @[Serdes.scala:19:17, :24:26]
wire _GEN_2 = io_out_ready & (|_GEN_0); // @[Decoupled.scala:51:35]
always @(posedge clock) begin // @[Serdes.scala:8:7]
if (_GEN_2 & _io_out_bits_flit_T) begin // @[Decoupled.scala:51:35]
data_1 <= io_in_bits_payload[61:30]; // @[Serdes.scala:19:17, :29:34]
data_2 <= {8'h0, io_in_bits_payload[85:62]}; // @[Serdes.scala:19:17, :29:34]
end
if (reset) // @[Serdes.scala:8:7]
beat <= 2'h0; // @[Serdes.scala:8:7, :20:21]
else if (_GEN_2) // @[Decoupled.scala:51:35]
beat <= beat == 2'h2 ? 2'h0 : beat + 2'h1; // @[Serdes.scala:8:7, :20:21, :27:{16,22,53}]
always @(posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File DescribedSRAM.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3.{Data, SyncReadMem, Vec}
import chisel3.util.log2Ceil
object DescribedSRAM {
def apply[T <: Data](
name: String,
desc: String,
size: BigInt, // depth
data: T
): SyncReadMem[T] = {
val mem = SyncReadMem(size, data)
mem.suggestName(name)
val granWidth = data match {
case v: Vec[_] => v.head.getWidth
case d => d.getWidth
}
val uid = 0
Annotated.srams(
component = mem,
name = name,
address_width = log2Ceil(size),
data_width = data.getWidth,
depth = size,
description = desc,
write_mask_granularity = granWidth
)
mem
}
}
| module dataArrayB1Way_3_0( // @[DescribedSRAM.scala:17:26]
input [7:0] RW0_addr,
input RW0_en,
input RW0_clk,
input RW0_wmode,
input [63:0] RW0_wdata,
output [63:0] RW0_rdata
);
dataArrayB0Way_0_ext dataArrayB0Way_0_ext ( // @[DescribedSRAM.scala:17:26]
.RW0_addr (RW0_addr),
.RW0_en (RW0_en),
.RW0_clk (RW0_clk),
.RW0_wmode (RW0_wmode),
.RW0_wdata (RW0_wdata),
.RW0_rdata (RW0_rdata)
); // @[DescribedSRAM.scala:17:26]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
package constellation.channel
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.util._
import constellation.noc.{HasNoCParams}
class NoCMonitor(val cParam: ChannelParams)(implicit val p: Parameters) extends Module with HasNoCParams {
val io = IO(new Bundle {
val in = Input(new Channel(cParam))
})
val in_flight = RegInit(VecInit(Seq.fill(cParam.nVirtualChannels) { false.B }))
for (i <- 0 until cParam.srcSpeedup) {
val flit = io.in.flit(i)
when (flit.valid) {
when (flit.bits.head) {
in_flight(flit.bits.virt_channel_id) := true.B
assert (!in_flight(flit.bits.virt_channel_id), "Flit head/tail sequencing is broken")
}
when (flit.bits.tail) {
in_flight(flit.bits.virt_channel_id) := false.B
}
}
val possibleFlows = cParam.possibleFlows
when (flit.valid && flit.bits.head) {
cParam match {
case n: ChannelParams => n.virtualChannelParams.zipWithIndex.foreach { case (v,i) =>
assert(flit.bits.virt_channel_id =/= i.U || v.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
case _ => assert(cParam.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
}
}
}
File Types.scala:
package constellation.routing
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Parameters}
import constellation.noc.{HasNoCParams}
import constellation.channel.{Flit}
/** A representation for 1 specific virtual channel in wormhole routing
*
* @param src the source node
* @param vc ID for the virtual channel
* @param dst the destination node
* @param n_vc the number of virtual channels
*/
// BEGIN: ChannelRoutingInfo
case class ChannelRoutingInfo(
src: Int,
dst: Int,
vc: Int,
n_vc: Int
) {
// END: ChannelRoutingInfo
require (src >= -1 && dst >= -1 && vc >= 0, s"Illegal $this")
require (!(src == -1 && dst == -1), s"Illegal $this")
require (vc < n_vc, s"Illegal $this")
val isIngress = src == -1
val isEgress = dst == -1
}
/** Represents the properties of a packet that are relevant for routing
* ingressId and egressId uniquely identify a flow, but vnet and dst are used here
* to simplify the implementation of routingrelations
*
* @param ingressId packet's source ingress point
* @param egressId packet's destination egress point
* @param vNet virtual subnetwork identifier
* @param dst packet's destination node ID
*/
// BEGIN: FlowRoutingInfo
case class FlowRoutingInfo(
ingressId: Int,
egressId: Int,
vNetId: Int,
ingressNode: Int,
ingressNodeId: Int,
egressNode: Int,
egressNodeId: Int,
fifo: Boolean
) {
// END: FlowRoutingInfo
def isFlow(f: FlowRoutingBundle): Bool = {
(f.ingress_node === ingressNode.U &&
f.egress_node === egressNode.U &&
f.ingress_node_id === ingressNodeId.U &&
f.egress_node_id === egressNodeId.U)
}
def asLiteral(b: FlowRoutingBundle): BigInt = {
Seq(
(vNetId , b.vnet_id),
(ingressNode , b.ingress_node),
(ingressNodeId , b.ingress_node_id),
(egressNode , b.egress_node),
(egressNodeId , b.egress_node_id)
).foldLeft(0)((l, t) => {
(l << t._2.getWidth) | t._1
})
}
}
class FlowRoutingBundle(implicit val p: Parameters) extends Bundle with HasNoCParams {
// Instead of tracking ingress/egress ID, track the physical destination id and the offset at the destination
// This simplifies the routing tables
val vnet_id = UInt(log2Ceil(nVirtualNetworks).W)
val ingress_node = UInt(log2Ceil(nNodes).W)
val ingress_node_id = UInt(log2Ceil(maxIngressesAtNode).W)
val egress_node = UInt(log2Ceil(nNodes).W)
val egress_node_id = UInt(log2Ceil(maxEgressesAtNode).W)
}
| module NoCMonitor_41( // @[Monitor.scala:11:7]
input clock, // @[Monitor.scala:11:7]
input reset, // @[Monitor.scala:11:7]
input io_in_flit_0_valid, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_head, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_tail, // @[Monitor.scala:12:14]
input [4:0] io_in_flit_0_bits_flow_ingress_node, // @[Monitor.scala:12:14]
input [1:0] io_in_flit_0_bits_flow_ingress_node_id, // @[Monitor.scala:12:14]
input [4:0] io_in_flit_0_bits_flow_egress_node, // @[Monitor.scala:12:14]
input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[Monitor.scala:12:14]
input [2:0] io_in_flit_0_bits_virt_channel_id // @[Monitor.scala:12:14]
);
reg in_flight_0; // @[Monitor.scala:16:26]
reg in_flight_1; // @[Monitor.scala:16:26]
reg in_flight_2; // @[Monitor.scala:16:26]
reg in_flight_3; // @[Monitor.scala:16:26]
reg in_flight_4; // @[Monitor.scala:16:26]
reg in_flight_5; // @[Monitor.scala:16:26]
reg in_flight_6; // @[Monitor.scala:16:26]
reg in_flight_7; // @[Monitor.scala:16:26] |
Generate the Verilog code corresponding to the following Chisel files.
File InputUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{FlowRoutingBundle}
import constellation.noc.{HasNoCParams}
class AbstractInputUnitIO(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams],
)(implicit val p: Parameters) extends Bundle with HasRouterOutputParams {
val nodeId = cParam.destId
val router_req = Decoupled(new RouteComputerReq)
val router_resp = Input(new RouteComputerResp(outParams, egressParams))
val vcalloc_req = Decoupled(new VCAllocReq(cParam, outParams, egressParams))
val vcalloc_resp = Input(new VCAllocResp(outParams, egressParams))
val out_credit_available = Input(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }))
val salloc_req = Vec(cParam.destSpeedup, Decoupled(new SwitchAllocReq(outParams, egressParams)))
val out = Vec(cParam.destSpeedup, Valid(new SwitchBundle(outParams, egressParams)))
val debug = Output(new Bundle {
val va_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
val sa_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
})
val block = Input(Bool())
}
abstract class AbstractInputUnit(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams]
)(implicit val p: Parameters) extends Module with HasRouterOutputParams with HasNoCParams {
val nodeId = cParam.destId
def io: AbstractInputUnitIO
}
class InputBuffer(cParam: ChannelParams)(implicit p: Parameters) extends Module {
val nVirtualChannels = cParam.nVirtualChannels
val io = IO(new Bundle {
val enq = Flipped(Vec(cParam.srcSpeedup, Valid(new Flit(cParam.payloadBits))))
val deq = Vec(cParam.nVirtualChannels, Decoupled(new BaseFlit(cParam.payloadBits)))
})
val useOutputQueues = cParam.useOutputQueues
val delims = if (useOutputQueues) {
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize else 0).scanLeft(0)(_+_)
} else {
// If no queuing, have to add an additional slot since head == tail implies empty
// TODO this should be fixed, should use all slots available
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize + 1 else 0).scanLeft(0)(_+_)
}
val starts = delims.dropRight(1).zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val ends = delims.tail.zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val fullSize = delims.last
// Ugly case. Use multiple queues
if ((cParam.srcSpeedup > 1 || cParam.destSpeedup > 1 || fullSize <= 1) || !cParam.unifiedBuffer) {
require(useOutputQueues)
val qs = cParam.virtualChannelParams.map(v => Module(new Queue(new BaseFlit(cParam.payloadBits), v.bufferSize)))
qs.zipWithIndex.foreach { case (q,i) =>
val sel = io.enq.map(f => f.valid && f.bits.virt_channel_id === i.U)
q.io.enq.valid := sel.orR
q.io.enq.bits.head := Mux1H(sel, io.enq.map(_.bits.head))
q.io.enq.bits.tail := Mux1H(sel, io.enq.map(_.bits.tail))
q.io.enq.bits.payload := Mux1H(sel, io.enq.map(_.bits.payload))
io.deq(i) <> q.io.deq
}
} else {
val mem = Mem(fullSize, new BaseFlit(cParam.payloadBits))
val heads = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val tails = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val empty = (heads zip tails).map(t => t._1 === t._2)
val qs = Seq.fill(nVirtualChannels) { Module(new Queue(new BaseFlit(cParam.payloadBits), 1, pipe=true)) }
qs.foreach(_.io.enq.valid := false.B)
qs.foreach(_.io.enq.bits := DontCare)
val vc_sel = UIntToOH(io.enq(0).bits.virt_channel_id)
val flit = Wire(new BaseFlit(cParam.payloadBits))
val direct_to_q = (Mux1H(vc_sel, qs.map(_.io.enq.ready)) && Mux1H(vc_sel, empty)) && useOutputQueues.B
flit.head := io.enq(0).bits.head
flit.tail := io.enq(0).bits.tail
flit.payload := io.enq(0).bits.payload
when (io.enq(0).valid && !direct_to_q) {
val tail = tails(io.enq(0).bits.virt_channel_id)
mem.write(tail, flit)
tails(io.enq(0).bits.virt_channel_id) := Mux(
tail === Mux1H(vc_sel, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(vc_sel, starts.map(_.U)),
tail + 1.U)
} .elsewhen (io.enq(0).valid && direct_to_q) {
for (i <- 0 until nVirtualChannels) {
when (io.enq(0).bits.virt_channel_id === i.U) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := flit
}
}
}
if (useOutputQueues) {
val can_to_q = (0 until nVirtualChannels).map { i => !empty(i) && qs(i).io.enq.ready }
val to_q_oh = PriorityEncoderOH(can_to_q)
val to_q = OHToUInt(to_q_oh)
when (can_to_q.orR) {
val head = Mux1H(to_q_oh, heads)
heads(to_q) := Mux(
head === Mux1H(to_q_oh, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(to_q_oh, starts.map(_.U)),
head + 1.U)
for (i <- 0 until nVirtualChannels) {
when (to_q_oh(i)) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := mem.read(head)
}
}
}
for (i <- 0 until nVirtualChannels) {
io.deq(i) <> qs(i).io.deq
}
} else {
qs.map(_.io.deq.ready := false.B)
val ready_sel = io.deq.map(_.ready)
val fire = io.deq.map(_.fire)
assert(PopCount(fire) <= 1.U)
val head = Mux1H(fire, heads)
when (fire.orR) {
val fire_idx = OHToUInt(fire)
heads(fire_idx) := Mux(
head === Mux1H(fire, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(fire, starts.map(_.U)),
head + 1.U)
}
val read_flit = mem.read(head)
for (i <- 0 until nVirtualChannels) {
io.deq(i).valid := !empty(i)
io.deq(i).bits := read_flit
}
}
}
}
class InputUnit(cParam: ChannelParams, outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean, combineSAST: Boolean
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
val nVirtualChannels = cParam.nVirtualChannels
val virtualChannelParams = cParam.virtualChannelParams
class InputUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(new Channel(cParam.asInstanceOf[ChannelParams]))
}
val io = IO(new InputUnitIO)
val g_i :: g_r :: g_v :: g_a :: g_c :: Nil = Enum(5)
class InputState extends Bundle {
val g = UInt(3.W)
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
val flow = new FlowRoutingBundle
val fifo_deps = UInt(nVirtualChannels.W)
}
val input_buffer = Module(new InputBuffer(cParam))
for (i <- 0 until cParam.srcSpeedup) {
input_buffer.io.enq(i) := io.in.flit(i)
}
input_buffer.io.deq.foreach(_.ready := false.B)
val route_arbiter = Module(new Arbiter(
new RouteComputerReq, nVirtualChannels
))
io.router_req <> route_arbiter.io.out
val states = Reg(Vec(nVirtualChannels, new InputState))
val anyFifo = cParam.possibleFlows.map(_.fifo).reduce(_||_)
val allFifo = cParam.possibleFlows.map(_.fifo).reduce(_&&_)
if (anyFifo) {
val idle_mask = VecInit(states.map(_.g === g_i)).asUInt
for (s <- states)
for (i <- 0 until nVirtualChannels)
s.fifo_deps := s.fifo_deps & ~idle_mask
}
for (i <- 0 until cParam.srcSpeedup) {
when (io.in.flit(i).fire && io.in.flit(i).bits.head) {
val id = io.in.flit(i).bits.virt_channel_id
assert(id < nVirtualChannels.U)
assert(states(id).g === g_i)
val at_dest = io.in.flit(i).bits.flow.egress_node === nodeId.U
states(id).g := Mux(at_dest, g_v, g_r)
states(id).vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (o.U === io.in.flit(i).bits.flow.egress_node_id) {
states(id).vc_sel(o+nOutputs)(0) := true.B
}
}
states(id).flow := io.in.flit(i).bits.flow
if (anyFifo) {
val fifo = cParam.possibleFlows.filter(_.fifo).map(_.isFlow(io.in.flit(i).bits.flow)).toSeq.orR
states(id).fifo_deps := VecInit(states.zipWithIndex.map { case (s, j) =>
s.g =/= g_i && s.flow.asUInt === io.in.flit(i).bits.flow.asUInt && j.U =/= id
}).asUInt
}
}
}
(route_arbiter.io.in zip states).zipWithIndex.map { case ((i,s),idx) =>
if (virtualChannelParams(idx).traversable) {
i.valid := s.g === g_r
i.bits.flow := s.flow
i.bits.src_virt_id := idx.U
when (i.fire) { s.g := g_v }
} else {
i.valid := false.B
i.bits := DontCare
}
}
when (io.router_req.fire) {
val id = io.router_req.bits.src_virt_id
assert(states(id).g === g_r)
states(id).g := g_v
for (i <- 0 until nVirtualChannels) {
when (i.U === id) {
states(i).vc_sel := io.router_resp.vc_sel
}
}
}
val mask = RegInit(0.U(nVirtualChannels.W))
val vcalloc_reqs = Wire(Vec(nVirtualChannels, new VCAllocReq(cParam, outParams, egressParams)))
val vcalloc_vals = Wire(Vec(nVirtualChannels, Bool()))
val vcalloc_filter = PriorityEncoderOH(Cat(vcalloc_vals.asUInt, vcalloc_vals.asUInt & ~mask))
val vcalloc_sel = vcalloc_filter(nVirtualChannels-1,0) | (vcalloc_filter >> nVirtualChannels)
// Prioritize incoming packetes
when (io.router_req.fire) {
mask := (1.U << io.router_req.bits.src_virt_id) - 1.U
} .elsewhen (vcalloc_vals.orR) {
mask := Mux1H(vcalloc_sel, (0 until nVirtualChannels).map { w => ~(0.U((w+1).W)) })
}
io.vcalloc_req.valid := vcalloc_vals.orR
io.vcalloc_req.bits := Mux1H(vcalloc_sel, vcalloc_reqs)
states.zipWithIndex.map { case (s,idx) =>
if (virtualChannelParams(idx).traversable) {
vcalloc_vals(idx) := s.g === g_v && s.fifo_deps === 0.U
vcalloc_reqs(idx).in_vc := idx.U
vcalloc_reqs(idx).vc_sel := s.vc_sel
vcalloc_reqs(idx).flow := s.flow
when (vcalloc_vals(idx) && vcalloc_sel(idx) && io.vcalloc_req.ready) { s.g := g_a }
if (combineRCVA) {
when (route_arbiter.io.in(idx).fire) {
vcalloc_vals(idx) := true.B
vcalloc_reqs(idx).vc_sel := io.router_resp.vc_sel
}
}
} else {
vcalloc_vals(idx) := false.B
vcalloc_reqs(idx) := DontCare
}
}
io.debug.va_stall := PopCount(vcalloc_vals) - io.vcalloc_req.ready
when (io.vcalloc_req.fire) {
for (i <- 0 until nVirtualChannels) {
when (vcalloc_sel(i)) {
states(i).vc_sel := io.vcalloc_resp.vc_sel
states(i).g := g_a
if (!combineRCVA) {
assert(states(i).g === g_v)
}
}
}
}
val salloc_arb = Module(new SwitchArbiter(
nVirtualChannels,
cParam.destSpeedup,
outParams, egressParams
))
(states zip salloc_arb.io.in).zipWithIndex.map { case ((s,r),i) =>
if (virtualChannelParams(i).traversable) {
val credit_available = (s.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
r.valid := s.g === g_a && credit_available && input_buffer.io.deq(i).valid
r.bits.vc_sel := s.vc_sel
val deq_tail = input_buffer.io.deq(i).bits.tail
r.bits.tail := deq_tail
when (r.fire && deq_tail) {
s.g := g_i
}
input_buffer.io.deq(i).ready := r.ready
} else {
r.valid := false.B
r.bits := DontCare
}
}
io.debug.sa_stall := PopCount(salloc_arb.io.in.map(r => r.valid && !r.ready))
io.salloc_req <> salloc_arb.io.out
when (io.block) {
salloc_arb.io.out.foreach(_.ready := false.B)
io.salloc_req.foreach(_.valid := false.B)
}
class OutBundle extends Bundle {
val valid = Bool()
val vid = UInt(virtualChannelBits.W)
val out_vid = UInt(log2Up(allOutParams.map(_.nVirtualChannels).max).W)
val flit = new Flit(cParam.payloadBits)
}
val salloc_outs = if (combineSAST) {
Wire(Vec(cParam.destSpeedup, new OutBundle))
} else {
Reg(Vec(cParam.destSpeedup, new OutBundle))
}
io.in.credit_return := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire, salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
io.in.vc_free := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire && Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)),
salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
for (i <- 0 until cParam.destSpeedup) {
val salloc_out = salloc_outs(i)
salloc_out.valid := salloc_arb.io.out(i).fire
salloc_out.vid := OHToUInt(salloc_arb.io.chosen_oh(i))
val vc_sel = Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.vc_sel))
val channel_oh = vc_sel.map(_.reduce(_||_)).toSeq
val virt_channel = Mux1H(channel_oh, vc_sel.map(v => OHToUInt(v)).toSeq)
when (salloc_arb.io.out(i).fire) {
salloc_out.out_vid := virt_channel
salloc_out.flit.payload := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.payload))
salloc_out.flit.head := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.head))
salloc_out.flit.tail := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail))
salloc_out.flit.flow := Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.flow))
} .otherwise {
salloc_out.out_vid := DontCare
salloc_out.flit := DontCare
}
salloc_out.flit.virt_channel_id := DontCare // this gets set in the switch
io.out(i).valid := salloc_out.valid
io.out(i).bits.flit := salloc_out.flit
io.out(i).bits.out_virt_channel := salloc_out.out_vid
}
def filterVCSel(sel: MixedVec[Vec[Bool]], srcV: Int) = {
if (virtualChannelParams(srcV).traversable) {
outParams.zipWithIndex.map { case (oP, oI) =>
(0 until oP.nVirtualChannels).map { oV =>
var allow = false
virtualChannelParams(srcV).possibleFlows.foreach { pI =>
allow = allow || routingRelation(
cParam.channelRoutingInfos(srcV),
oP.channelRoutingInfos(oV),
pI
)
}
if (!allow)
sel(oI)(oV) := false.B
}
}
}
}
(0 until nVirtualChannels).map { i =>
if (!virtualChannelParams(i).traversable) states(i) := DontCare
filterVCSel(states(i).vc_sel, i)
}
when (reset.asBool) {
states.foreach(_.g := g_i)
}
}
| module InputUnit_43( // @[InputUnit.scala:158:7]
input clock, // @[InputUnit.scala:158:7]
input reset, // @[InputUnit.scala:158:7]
output [1:0] io_router_req_bits_src_virt_id, // @[InputUnit.scala:170:14]
output [1:0] io_router_req_bits_flow_vnet_id, // @[InputUnit.scala:170:14]
output [3:0] io_router_req_bits_flow_ingress_node, // @[InputUnit.scala:170:14]
output [2:0] io_router_req_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14]
output [3:0] io_router_req_bits_flow_egress_node, // @[InputUnit.scala:170:14]
output [1:0] io_router_req_bits_flow_egress_node_id, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_1_0, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_1_1, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_1_2, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_0, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_1, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_2, // @[InputUnit.scala:170:14]
input io_vcalloc_req_ready, // @[InputUnit.scala:170:14]
output io_vcalloc_req_valid, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_2_0, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_1_0, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_1_1, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_1_2, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_0, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_1, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_2, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_2_0, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_1, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_2, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_0, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_1, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_2, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_1, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_2, // @[InputUnit.scala:170:14]
input io_salloc_req_0_ready, // @[InputUnit.scala:170:14]
output io_salloc_req_0_valid, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_0, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_0, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_1, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_2, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_1, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_2, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_tail, // @[InputUnit.scala:170:14]
output io_out_0_valid, // @[InputUnit.scala:170:14]
output io_out_0_bits_flit_head, // @[InputUnit.scala:170:14]
output io_out_0_bits_flit_tail, // @[InputUnit.scala:170:14]
output [144:0] io_out_0_bits_flit_payload, // @[InputUnit.scala:170:14]
output [1:0] io_out_0_bits_flit_flow_vnet_id, // @[InputUnit.scala:170:14]
output [3:0] io_out_0_bits_flit_flow_ingress_node, // @[InputUnit.scala:170:14]
output [2:0] io_out_0_bits_flit_flow_ingress_node_id, // @[InputUnit.scala:170:14]
output [3:0] io_out_0_bits_flit_flow_egress_node, // @[InputUnit.scala:170:14]
output [1:0] io_out_0_bits_flit_flow_egress_node_id, // @[InputUnit.scala:170:14]
output [1:0] io_out_0_bits_out_virt_channel, // @[InputUnit.scala:170:14]
output [1:0] io_debug_va_stall, // @[InputUnit.scala:170:14]
output [1:0] io_debug_sa_stall, // @[InputUnit.scala:170:14]
input io_in_flit_0_valid, // @[InputUnit.scala:170:14]
input io_in_flit_0_bits_head, // @[InputUnit.scala:170:14]
input io_in_flit_0_bits_tail, // @[InputUnit.scala:170:14]
input [144:0] io_in_flit_0_bits_payload, // @[InputUnit.scala:170:14]
input [1:0] io_in_flit_0_bits_flow_vnet_id, // @[InputUnit.scala:170:14]
input [3:0] io_in_flit_0_bits_flow_ingress_node, // @[InputUnit.scala:170:14]
input [2:0] io_in_flit_0_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14]
input [3:0] io_in_flit_0_bits_flow_egress_node, // @[InputUnit.scala:170:14]
input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[InputUnit.scala:170:14]
input [1:0] io_in_flit_0_bits_virt_channel_id, // @[InputUnit.scala:170:14]
output [2:0] io_in_credit_return, // @[InputUnit.scala:170:14]
output [2:0] io_in_vc_free // @[InputUnit.scala:170:14]
);
wire _GEN; // @[MixedVec.scala:116:9]
wire vcalloc_reqs_2_vc_sel_0_2; // @[MixedVec.scala:116:9]
wire vcalloc_vals_2; // @[InputUnit.scala:266:25, :272:46, :273:29]
wire _GEN_0; // @[MixedVec.scala:116:9]
wire vcalloc_reqs_1_vc_sel_0_1; // @[MixedVec.scala:116:9]
wire vcalloc_vals_1; // @[InputUnit.scala:266:25, :272:46, :273:29]
wire _salloc_arb_io_in_1_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_2_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_out_0_valid; // @[InputUnit.scala:296:26]
wire [2:0] _salloc_arb_io_chosen_oh_0; // @[InputUnit.scala:296:26]
wire _route_arbiter_io_in_1_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_2_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_out_valid; // @[InputUnit.scala:187:29]
wire [1:0] _route_arbiter_io_out_bits_src_virt_id; // @[InputUnit.scala:187:29]
wire _input_buffer_io_deq_0_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_0_bits_tail; // @[InputUnit.scala:181:28]
wire [144:0] _input_buffer_io_deq_0_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_bits_tail; // @[InputUnit.scala:181:28]
wire [144:0] _input_buffer_io_deq_1_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_bits_tail; // @[InputUnit.scala:181:28]
wire [144:0] _input_buffer_io_deq_2_bits_payload; // @[InputUnit.scala:181:28]
reg [2:0] states_1_g; // @[InputUnit.scala:192:19]
reg states_1_vc_sel_2_0; // @[InputUnit.scala:192:19]
reg states_1_vc_sel_0_1; // @[InputUnit.scala:192:19]
reg [1:0] states_1_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [3:0] states_1_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_1_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [3:0] states_1_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_1_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_2_g; // @[InputUnit.scala:192:19]
reg states_2_vc_sel_2_0; // @[InputUnit.scala:192:19]
reg states_2_vc_sel_0_2; // @[InputUnit.scala:192:19]
reg [1:0] states_2_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [3:0] states_2_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_2_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [3:0] states_2_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_2_flow_egress_node_id; // @[InputUnit.scala:192:19]
wire _GEN_1 = io_in_flit_0_valid & io_in_flit_0_bits_head; // @[InputUnit.scala:205:30]
wire route_arbiter_io_in_1_valid = states_1_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire _GEN_2 = _route_arbiter_io_in_1_ready & route_arbiter_io_in_1_valid; // @[Decoupled.scala:51:35]
wire route_arbiter_io_in_2_valid = states_2_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire _GEN_3 = _route_arbiter_io_in_2_ready & route_arbiter_io_in_2_valid; // @[Decoupled.scala:51:35] |
Generate the Verilog code corresponding to the following Chisel files.
File InputUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{FlowRoutingBundle}
import constellation.noc.{HasNoCParams}
class AbstractInputUnitIO(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams],
)(implicit val p: Parameters) extends Bundle with HasRouterOutputParams {
val nodeId = cParam.destId
val router_req = Decoupled(new RouteComputerReq)
val router_resp = Input(new RouteComputerResp(outParams, egressParams))
val vcalloc_req = Decoupled(new VCAllocReq(cParam, outParams, egressParams))
val vcalloc_resp = Input(new VCAllocResp(outParams, egressParams))
val out_credit_available = Input(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }))
val salloc_req = Vec(cParam.destSpeedup, Decoupled(new SwitchAllocReq(outParams, egressParams)))
val out = Vec(cParam.destSpeedup, Valid(new SwitchBundle(outParams, egressParams)))
val debug = Output(new Bundle {
val va_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
val sa_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
})
val block = Input(Bool())
}
abstract class AbstractInputUnit(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams]
)(implicit val p: Parameters) extends Module with HasRouterOutputParams with HasNoCParams {
val nodeId = cParam.destId
def io: AbstractInputUnitIO
}
class InputBuffer(cParam: ChannelParams)(implicit p: Parameters) extends Module {
val nVirtualChannels = cParam.nVirtualChannels
val io = IO(new Bundle {
val enq = Flipped(Vec(cParam.srcSpeedup, Valid(new Flit(cParam.payloadBits))))
val deq = Vec(cParam.nVirtualChannels, Decoupled(new BaseFlit(cParam.payloadBits)))
})
val useOutputQueues = cParam.useOutputQueues
val delims = if (useOutputQueues) {
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize else 0).scanLeft(0)(_+_)
} else {
// If no queuing, have to add an additional slot since head == tail implies empty
// TODO this should be fixed, should use all slots available
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize + 1 else 0).scanLeft(0)(_+_)
}
val starts = delims.dropRight(1).zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val ends = delims.tail.zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val fullSize = delims.last
// Ugly case. Use multiple queues
if ((cParam.srcSpeedup > 1 || cParam.destSpeedup > 1 || fullSize <= 1) || !cParam.unifiedBuffer) {
require(useOutputQueues)
val qs = cParam.virtualChannelParams.map(v => Module(new Queue(new BaseFlit(cParam.payloadBits), v.bufferSize)))
qs.zipWithIndex.foreach { case (q,i) =>
val sel = io.enq.map(f => f.valid && f.bits.virt_channel_id === i.U)
q.io.enq.valid := sel.orR
q.io.enq.bits.head := Mux1H(sel, io.enq.map(_.bits.head))
q.io.enq.bits.tail := Mux1H(sel, io.enq.map(_.bits.tail))
q.io.enq.bits.payload := Mux1H(sel, io.enq.map(_.bits.payload))
io.deq(i) <> q.io.deq
}
} else {
val mem = Mem(fullSize, new BaseFlit(cParam.payloadBits))
val heads = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val tails = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val empty = (heads zip tails).map(t => t._1 === t._2)
val qs = Seq.fill(nVirtualChannels) { Module(new Queue(new BaseFlit(cParam.payloadBits), 1, pipe=true)) }
qs.foreach(_.io.enq.valid := false.B)
qs.foreach(_.io.enq.bits := DontCare)
val vc_sel = UIntToOH(io.enq(0).bits.virt_channel_id)
val flit = Wire(new BaseFlit(cParam.payloadBits))
val direct_to_q = (Mux1H(vc_sel, qs.map(_.io.enq.ready)) && Mux1H(vc_sel, empty)) && useOutputQueues.B
flit.head := io.enq(0).bits.head
flit.tail := io.enq(0).bits.tail
flit.payload := io.enq(0).bits.payload
when (io.enq(0).valid && !direct_to_q) {
val tail = tails(io.enq(0).bits.virt_channel_id)
mem.write(tail, flit)
tails(io.enq(0).bits.virt_channel_id) := Mux(
tail === Mux1H(vc_sel, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(vc_sel, starts.map(_.U)),
tail + 1.U)
} .elsewhen (io.enq(0).valid && direct_to_q) {
for (i <- 0 until nVirtualChannels) {
when (io.enq(0).bits.virt_channel_id === i.U) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := flit
}
}
}
if (useOutputQueues) {
val can_to_q = (0 until nVirtualChannels).map { i => !empty(i) && qs(i).io.enq.ready }
val to_q_oh = PriorityEncoderOH(can_to_q)
val to_q = OHToUInt(to_q_oh)
when (can_to_q.orR) {
val head = Mux1H(to_q_oh, heads)
heads(to_q) := Mux(
head === Mux1H(to_q_oh, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(to_q_oh, starts.map(_.U)),
head + 1.U)
for (i <- 0 until nVirtualChannels) {
when (to_q_oh(i)) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := mem.read(head)
}
}
}
for (i <- 0 until nVirtualChannels) {
io.deq(i) <> qs(i).io.deq
}
} else {
qs.map(_.io.deq.ready := false.B)
val ready_sel = io.deq.map(_.ready)
val fire = io.deq.map(_.fire)
assert(PopCount(fire) <= 1.U)
val head = Mux1H(fire, heads)
when (fire.orR) {
val fire_idx = OHToUInt(fire)
heads(fire_idx) := Mux(
head === Mux1H(fire, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(fire, starts.map(_.U)),
head + 1.U)
}
val read_flit = mem.read(head)
for (i <- 0 until nVirtualChannels) {
io.deq(i).valid := !empty(i)
io.deq(i).bits := read_flit
}
}
}
}
class InputUnit(cParam: ChannelParams, outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean, combineSAST: Boolean
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
val nVirtualChannels = cParam.nVirtualChannels
val virtualChannelParams = cParam.virtualChannelParams
class InputUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(new Channel(cParam.asInstanceOf[ChannelParams]))
}
val io = IO(new InputUnitIO)
val g_i :: g_r :: g_v :: g_a :: g_c :: Nil = Enum(5)
class InputState extends Bundle {
val g = UInt(3.W)
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
val flow = new FlowRoutingBundle
val fifo_deps = UInt(nVirtualChannels.W)
}
val input_buffer = Module(new InputBuffer(cParam))
for (i <- 0 until cParam.srcSpeedup) {
input_buffer.io.enq(i) := io.in.flit(i)
}
input_buffer.io.deq.foreach(_.ready := false.B)
val route_arbiter = Module(new Arbiter(
new RouteComputerReq, nVirtualChannels
))
io.router_req <> route_arbiter.io.out
val states = Reg(Vec(nVirtualChannels, new InputState))
val anyFifo = cParam.possibleFlows.map(_.fifo).reduce(_||_)
val allFifo = cParam.possibleFlows.map(_.fifo).reduce(_&&_)
if (anyFifo) {
val idle_mask = VecInit(states.map(_.g === g_i)).asUInt
for (s <- states)
for (i <- 0 until nVirtualChannels)
s.fifo_deps := s.fifo_deps & ~idle_mask
}
for (i <- 0 until cParam.srcSpeedup) {
when (io.in.flit(i).fire && io.in.flit(i).bits.head) {
val id = io.in.flit(i).bits.virt_channel_id
assert(id < nVirtualChannels.U)
assert(states(id).g === g_i)
val at_dest = io.in.flit(i).bits.flow.egress_node === nodeId.U
states(id).g := Mux(at_dest, g_v, g_r)
states(id).vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (o.U === io.in.flit(i).bits.flow.egress_node_id) {
states(id).vc_sel(o+nOutputs)(0) := true.B
}
}
states(id).flow := io.in.flit(i).bits.flow
if (anyFifo) {
val fifo = cParam.possibleFlows.filter(_.fifo).map(_.isFlow(io.in.flit(i).bits.flow)).toSeq.orR
states(id).fifo_deps := VecInit(states.zipWithIndex.map { case (s, j) =>
s.g =/= g_i && s.flow.asUInt === io.in.flit(i).bits.flow.asUInt && j.U =/= id
}).asUInt
}
}
}
(route_arbiter.io.in zip states).zipWithIndex.map { case ((i,s),idx) =>
if (virtualChannelParams(idx).traversable) {
i.valid := s.g === g_r
i.bits.flow := s.flow
i.bits.src_virt_id := idx.U
when (i.fire) { s.g := g_v }
} else {
i.valid := false.B
i.bits := DontCare
}
}
when (io.router_req.fire) {
val id = io.router_req.bits.src_virt_id
assert(states(id).g === g_r)
states(id).g := g_v
for (i <- 0 until nVirtualChannels) {
when (i.U === id) {
states(i).vc_sel := io.router_resp.vc_sel
}
}
}
val mask = RegInit(0.U(nVirtualChannels.W))
val vcalloc_reqs = Wire(Vec(nVirtualChannels, new VCAllocReq(cParam, outParams, egressParams)))
val vcalloc_vals = Wire(Vec(nVirtualChannels, Bool()))
val vcalloc_filter = PriorityEncoderOH(Cat(vcalloc_vals.asUInt, vcalloc_vals.asUInt & ~mask))
val vcalloc_sel = vcalloc_filter(nVirtualChannels-1,0) | (vcalloc_filter >> nVirtualChannels)
// Prioritize incoming packetes
when (io.router_req.fire) {
mask := (1.U << io.router_req.bits.src_virt_id) - 1.U
} .elsewhen (vcalloc_vals.orR) {
mask := Mux1H(vcalloc_sel, (0 until nVirtualChannels).map { w => ~(0.U((w+1).W)) })
}
io.vcalloc_req.valid := vcalloc_vals.orR
io.vcalloc_req.bits := Mux1H(vcalloc_sel, vcalloc_reqs)
states.zipWithIndex.map { case (s,idx) =>
if (virtualChannelParams(idx).traversable) {
vcalloc_vals(idx) := s.g === g_v && s.fifo_deps === 0.U
vcalloc_reqs(idx).in_vc := idx.U
vcalloc_reqs(idx).vc_sel := s.vc_sel
vcalloc_reqs(idx).flow := s.flow
when (vcalloc_vals(idx) && vcalloc_sel(idx) && io.vcalloc_req.ready) { s.g := g_a }
if (combineRCVA) {
when (route_arbiter.io.in(idx).fire) {
vcalloc_vals(idx) := true.B
vcalloc_reqs(idx).vc_sel := io.router_resp.vc_sel
}
}
} else {
vcalloc_vals(idx) := false.B
vcalloc_reqs(idx) := DontCare
}
}
io.debug.va_stall := PopCount(vcalloc_vals) - io.vcalloc_req.ready
when (io.vcalloc_req.fire) {
for (i <- 0 until nVirtualChannels) {
when (vcalloc_sel(i)) {
states(i).vc_sel := io.vcalloc_resp.vc_sel
states(i).g := g_a
if (!combineRCVA) {
assert(states(i).g === g_v)
}
}
}
}
val salloc_arb = Module(new SwitchArbiter(
nVirtualChannels,
cParam.destSpeedup,
outParams, egressParams
))
(states zip salloc_arb.io.in).zipWithIndex.map { case ((s,r),i) =>
if (virtualChannelParams(i).traversable) {
val credit_available = (s.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
r.valid := s.g === g_a && credit_available && input_buffer.io.deq(i).valid
r.bits.vc_sel := s.vc_sel
val deq_tail = input_buffer.io.deq(i).bits.tail
r.bits.tail := deq_tail
when (r.fire && deq_tail) {
s.g := g_i
}
input_buffer.io.deq(i).ready := r.ready
} else {
r.valid := false.B
r.bits := DontCare
}
}
io.debug.sa_stall := PopCount(salloc_arb.io.in.map(r => r.valid && !r.ready))
io.salloc_req <> salloc_arb.io.out
when (io.block) {
salloc_arb.io.out.foreach(_.ready := false.B)
io.salloc_req.foreach(_.valid := false.B)
}
class OutBundle extends Bundle {
val valid = Bool()
val vid = UInt(virtualChannelBits.W)
val out_vid = UInt(log2Up(allOutParams.map(_.nVirtualChannels).max).W)
val flit = new Flit(cParam.payloadBits)
}
val salloc_outs = if (combineSAST) {
Wire(Vec(cParam.destSpeedup, new OutBundle))
} else {
Reg(Vec(cParam.destSpeedup, new OutBundle))
}
io.in.credit_return := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire, salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
io.in.vc_free := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire && Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)),
salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
for (i <- 0 until cParam.destSpeedup) {
val salloc_out = salloc_outs(i)
salloc_out.valid := salloc_arb.io.out(i).fire
salloc_out.vid := OHToUInt(salloc_arb.io.chosen_oh(i))
val vc_sel = Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.vc_sel))
val channel_oh = vc_sel.map(_.reduce(_||_)).toSeq
val virt_channel = Mux1H(channel_oh, vc_sel.map(v => OHToUInt(v)).toSeq)
when (salloc_arb.io.out(i).fire) {
salloc_out.out_vid := virt_channel
salloc_out.flit.payload := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.payload))
salloc_out.flit.head := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.head))
salloc_out.flit.tail := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail))
salloc_out.flit.flow := Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.flow))
} .otherwise {
salloc_out.out_vid := DontCare
salloc_out.flit := DontCare
}
salloc_out.flit.virt_channel_id := DontCare // this gets set in the switch
io.out(i).valid := salloc_out.valid
io.out(i).bits.flit := salloc_out.flit
io.out(i).bits.out_virt_channel := salloc_out.out_vid
}
def filterVCSel(sel: MixedVec[Vec[Bool]], srcV: Int) = {
if (virtualChannelParams(srcV).traversable) {
outParams.zipWithIndex.map { case (oP, oI) =>
(0 until oP.nVirtualChannels).map { oV =>
var allow = false
virtualChannelParams(srcV).possibleFlows.foreach { pI =>
allow = allow || routingRelation(
cParam.channelRoutingInfos(srcV),
oP.channelRoutingInfos(oV),
pI
)
}
if (!allow)
sel(oI)(oV) := false.B
}
}
}
}
(0 until nVirtualChannels).map { i =>
if (!virtualChannelParams(i).traversable) states(i) := DontCare
filterVCSel(states(i).vc_sel, i)
}
when (reset.asBool) {
states.foreach(_.g := g_i)
}
}
| module InputUnit_18( // @[InputUnit.scala:158:7]
input clock, // @[InputUnit.scala:158:7]
input reset, // @[InputUnit.scala:158:7]
output [2:0] io_router_req_bits_src_virt_id, // @[InputUnit.scala:170:14]
output [2:0] io_router_req_bits_flow_vnet_id, // @[InputUnit.scala:170:14]
output [4:0] io_router_req_bits_flow_ingress_node, // @[InputUnit.scala:170:14]
output [1:0] io_router_req_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14]
output [4:0] io_router_req_bits_flow_egress_node, // @[InputUnit.scala:170:14]
output [1:0] io_router_req_bits_flow_egress_node_id, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_3_4, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_3_5, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_3_6, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_3_7, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_2_4, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_2_5, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_2_6, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_2_7, // @[InputUnit.scala:170:14]
input io_vcalloc_req_ready, // @[InputUnit.scala:170:14]
output io_vcalloc_req_valid, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_3_4, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_3_5, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_3_6, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_3_7, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_2_4, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_2_5, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_2_6, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_2_7, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_3_4, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_3_5, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_3_6, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_3_7, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_2_4, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_2_5, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_2_6, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_2_7, // @[InputUnit.scala:170:14]
input io_out_credit_available_3_0, // @[InputUnit.scala:170:14]
input io_out_credit_available_3_1, // @[InputUnit.scala:170:14]
input io_out_credit_available_3_2, // @[InputUnit.scala:170:14]
input io_out_credit_available_3_3, // @[InputUnit.scala:170:14]
input io_out_credit_available_3_4, // @[InputUnit.scala:170:14]
input io_out_credit_available_3_5, // @[InputUnit.scala:170:14]
input io_out_credit_available_3_6, // @[InputUnit.scala:170:14]
input io_out_credit_available_3_7, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_0, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_1, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_2, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_3, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_4, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_5, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_6, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_7, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_1, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_2, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_3, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_4, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_5, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_6, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_7, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_1, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_2, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_3, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_4, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_5, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_6, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_7, // @[InputUnit.scala:170:14]
input io_salloc_req_0_ready, // @[InputUnit.scala:170:14]
output io_salloc_req_0_valid, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_3_0, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_3_1, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_3_2, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_3_3, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_3_4, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_3_5, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_3_6, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_3_7, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_0, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_1, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_2, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_3, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_4, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_5, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_6, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_7, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_0, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_1, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_2, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_3, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_4, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_5, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_6, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_7, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_1, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_2, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_3, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_4, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_5, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_6, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_7, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_tail, // @[InputUnit.scala:170:14]
output io_out_0_valid, // @[InputUnit.scala:170:14]
output io_out_0_bits_flit_head, // @[InputUnit.scala:170:14]
output io_out_0_bits_flit_tail, // @[InputUnit.scala:170:14]
output [72:0] io_out_0_bits_flit_payload, // @[InputUnit.scala:170:14]
output [2:0] io_out_0_bits_flit_flow_vnet_id, // @[InputUnit.scala:170:14]
output [4:0] io_out_0_bits_flit_flow_ingress_node, // @[InputUnit.scala:170:14]
output [1:0] io_out_0_bits_flit_flow_ingress_node_id, // @[InputUnit.scala:170:14]
output [4:0] io_out_0_bits_flit_flow_egress_node, // @[InputUnit.scala:170:14]
output [1:0] io_out_0_bits_flit_flow_egress_node_id, // @[InputUnit.scala:170:14]
output [2:0] io_out_0_bits_out_virt_channel, // @[InputUnit.scala:170:14]
output [2:0] io_debug_va_stall, // @[InputUnit.scala:170:14]
output [2:0] io_debug_sa_stall, // @[InputUnit.scala:170:14]
input io_in_flit_0_valid, // @[InputUnit.scala:170:14]
input io_in_flit_0_bits_head, // @[InputUnit.scala:170:14]
input io_in_flit_0_bits_tail, // @[InputUnit.scala:170:14]
input [72:0] io_in_flit_0_bits_payload, // @[InputUnit.scala:170:14]
input [2:0] io_in_flit_0_bits_flow_vnet_id, // @[InputUnit.scala:170:14]
input [4:0] io_in_flit_0_bits_flow_ingress_node, // @[InputUnit.scala:170:14]
input [1:0] io_in_flit_0_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14]
input [4:0] io_in_flit_0_bits_flow_egress_node, // @[InputUnit.scala:170:14]
input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[InputUnit.scala:170:14]
input [2:0] io_in_flit_0_bits_virt_channel_id, // @[InputUnit.scala:170:14]
output [7:0] io_in_credit_return, // @[InputUnit.scala:170:14]
output [7:0] io_in_vc_free // @[InputUnit.scala:170:14]
);
wire vcalloc_vals_7; // @[InputUnit.scala:266:32]
wire vcalloc_vals_6; // @[InputUnit.scala:266:32]
wire vcalloc_vals_5; // @[InputUnit.scala:266:32]
wire vcalloc_vals_4; // @[InputUnit.scala:266:32]
wire _salloc_arb_io_in_4_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_5_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_6_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_7_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_out_0_valid; // @[InputUnit.scala:296:26]
wire [7:0] _salloc_arb_io_chosen_oh_0; // @[InputUnit.scala:296:26]
wire _route_arbiter_io_in_4_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_5_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_6_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_7_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_out_valid; // @[InputUnit.scala:187:29]
wire [2:0] _route_arbiter_io_out_bits_src_virt_id; // @[InputUnit.scala:187:29]
wire _input_buffer_io_deq_0_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_0_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_0_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_1_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_2_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_3_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_3_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_3_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_4_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_4_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_4_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_4_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_5_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_5_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_5_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_5_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_6_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_6_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_6_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_6_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_7_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_7_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_7_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_7_bits_payload; // @[InputUnit.scala:181:28]
reg [2:0] states_4_g; // @[InputUnit.scala:192:19]
reg states_4_vc_sel_3_4; // @[InputUnit.scala:192:19]
reg states_4_vc_sel_2_4; // @[InputUnit.scala:192:19]
reg [2:0] states_4_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [4:0] states_4_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_4_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [4:0] states_4_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_4_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_5_g; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_3_4; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_3_5; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_3_6; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_3_7; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_2_4; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_2_5; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_2_6; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_2_7; // @[InputUnit.scala:192:19]
reg [2:0] states_5_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [4:0] states_5_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_5_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [4:0] states_5_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_5_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_6_g; // @[InputUnit.scala:192:19]
reg states_6_vc_sel_3_4; // @[InputUnit.scala:192:19]
reg states_6_vc_sel_3_5; // @[InputUnit.scala:192:19]
reg states_6_vc_sel_3_6; // @[InputUnit.scala:192:19]
reg states_6_vc_sel_3_7; // @[InputUnit.scala:192:19]
reg states_6_vc_sel_2_4; // @[InputUnit.scala:192:19]
reg states_6_vc_sel_2_5; // @[InputUnit.scala:192:19]
reg states_6_vc_sel_2_6; // @[InputUnit.scala:192:19]
reg states_6_vc_sel_2_7; // @[InputUnit.scala:192:19]
reg [2:0] states_6_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [4:0] states_6_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_6_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [4:0] states_6_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_6_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_7_g; // @[InputUnit.scala:192:19]
reg states_7_vc_sel_3_4; // @[InputUnit.scala:192:19]
reg states_7_vc_sel_3_5; // @[InputUnit.scala:192:19]
reg states_7_vc_sel_3_6; // @[InputUnit.scala:192:19]
reg states_7_vc_sel_3_7; // @[InputUnit.scala:192:19]
reg states_7_vc_sel_2_4; // @[InputUnit.scala:192:19]
reg states_7_vc_sel_2_5; // @[InputUnit.scala:192:19]
reg states_7_vc_sel_2_6; // @[InputUnit.scala:192:19]
reg states_7_vc_sel_2_7; // @[InputUnit.scala:192:19]
reg [2:0] states_7_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [4:0] states_7_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_7_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [4:0] states_7_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_7_flow_egress_node_id; // @[InputUnit.scala:192:19]
wire _GEN = io_in_flit_0_valid & io_in_flit_0_bits_head; // @[InputUnit.scala:205:30]
wire route_arbiter_io_in_4_valid = states_4_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_5_valid = states_5_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_6_valid = states_6_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_7_valid = states_7_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
reg [7:0] mask; // @[InputUnit.scala:250:21]
wire [7:0] _vcalloc_filter_T_3 = {vcalloc_vals_7, vcalloc_vals_6, vcalloc_vals_5, vcalloc_vals_4, 4'h0} & ~mask; // @[InputUnit.scala:250:21, :253:{80,87,89}, :266:32]
wire [15:0] vcalloc_filter = _vcalloc_filter_T_3[0] ? 16'h1 : _vcalloc_filter_T_3[1] ? 16'h2 : _vcalloc_filter_T_3[2] ? 16'h4 : _vcalloc_filter_T_3[3] ? 16'h8 : _vcalloc_filter_T_3[4] ? 16'h10 : _vcalloc_filter_T_3[5] ? 16'h20 : _vcalloc_filter_T_3[6] ? 16'h40 : _vcalloc_filter_T_3[7] ? 16'h80 : vcalloc_vals_4 ? 16'h1000 : vcalloc_vals_5 ? 16'h2000 : vcalloc_vals_6 ? 16'h4000 : {vcalloc_vals_7, 15'h0}; // @[OneHot.scala:85:71]
wire [7:0] vcalloc_sel = vcalloc_filter[7:0] | vcalloc_filter[15:8]; // @[Mux.scala:50:70]
wire io_vcalloc_req_valid_0 = vcalloc_vals_4 | vcalloc_vals_5 | vcalloc_vals_6 | vcalloc_vals_7; // @[package.scala:81:59]
assign vcalloc_vals_4 = states_4_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_5 = states_5_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_6 = states_6_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_7 = states_7_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
wire _GEN_0 = io_vcalloc_req_ready & io_vcalloc_req_valid_0; // @[Decoupled.scala:51:35]
wire _GEN_1 = _GEN_0 & vcalloc_sel[4]; // @[Mux.scala:32:36]
wire _GEN_2 = _GEN_0 & vcalloc_sel[5]; // @[Mux.scala:32:36]
wire _GEN_3 = _GEN_0 & vcalloc_sel[6]; // @[Mux.scala:32:36]
wire _GEN_4 = _GEN_0 & vcalloc_sel[7]; // @[Mux.scala:32:36] |
Generate the Verilog code corresponding to the following Chisel files.
File Fragmenter.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressSet, BufferParams, IdRange, TransferSizes}
import freechips.rocketchip.util.{Repeater, OH1ToUInt, UIntToOH1}
import scala.math.min
import freechips.rocketchip.util.DataToAugmentedData
object EarlyAck {
sealed trait T
case object AllPuts extends T
case object PutFulls extends T
case object None extends T
}
// minSize: minimum size of transfers supported by all outward managers
// maxSize: maximum size of transfers supported after the Fragmenter is applied
// alwaysMin: fragment all requests down to minSize (else fragment to maximum supported by manager)
// earlyAck: should a multibeat Put should be acknowledged on the first beat or last beat
// holdFirstDeny: allow the Fragmenter to unsafely combine multibeat Gets by taking the first denied for the whole burst
// nameSuffix: appends a suffix to the module name
// Fragmenter modifies: PutFull, PutPartial, LogicalData, Get, Hint
// Fragmenter passes: ArithmeticData (truncated to minSize if alwaysMin)
// Fragmenter cannot modify acquire (could livelock); thus it is unsafe to put caches on both sides
class TLFragmenter(val minSize: Int, val maxSize: Int, val alwaysMin: Boolean = false, val earlyAck: EarlyAck.T = EarlyAck.None, val holdFirstDeny: Boolean = false, val nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule
{
require(isPow2 (maxSize), s"TLFragmenter expects pow2(maxSize), but got $maxSize")
require(isPow2 (minSize), s"TLFragmenter expects pow2(minSize), but got $minSize")
require(minSize <= maxSize, s"TLFragmenter expects min <= max, but got $minSize > $maxSize")
val fragmentBits = log2Ceil(maxSize / minSize)
val fullBits = if (earlyAck == EarlyAck.PutFulls) 1 else 0
val toggleBits = 1
val addedBits = fragmentBits + toggleBits + fullBits
def expandTransfer(x: TransferSizes, op: String) = if (!x) x else {
// validate that we can apply the fragmenter correctly
require (x.max >= minSize, s"TLFragmenter (with parent $parent) max transfer size $op(${x.max}) must be >= min transfer size (${minSize})")
TransferSizes(x.min, maxSize)
}
private def noChangeRequired = minSize == maxSize
private def shrinkTransfer(x: TransferSizes) =
if (!alwaysMin) x
else if (x.min <= minSize) TransferSizes(x.min, min(minSize, x.max))
else TransferSizes.none
private def mapManager(m: TLSlaveParameters) = m.v1copy(
supportsArithmetic = shrinkTransfer(m.supportsArithmetic),
supportsLogical = shrinkTransfer(m.supportsLogical),
supportsGet = expandTransfer(m.supportsGet, "Get"),
supportsPutFull = expandTransfer(m.supportsPutFull, "PutFull"),
supportsPutPartial = expandTransfer(m.supportsPutPartial, "PutParital"),
supportsHint = expandTransfer(m.supportsHint, "Hint"))
val node = new TLAdapterNode(
// We require that all the responses are mutually FIFO
// Thus we need to compact all of the masters into one big master
clientFn = { c => (if (noChangeRequired) c else c.v2copy(
masters = Seq(TLMasterParameters.v2(
name = "TLFragmenter",
sourceId = IdRange(0, if (minSize == maxSize) c.endSourceId else (c.endSourceId << addedBits)),
requestFifo = true,
emits = TLMasterToSlaveTransferSizes(
acquireT = shrinkTransfer(c.masters.map(_.emits.acquireT) .reduce(_ mincover _)),
acquireB = shrinkTransfer(c.masters.map(_.emits.acquireB) .reduce(_ mincover _)),
arithmetic = shrinkTransfer(c.masters.map(_.emits.arithmetic).reduce(_ mincover _)),
logical = shrinkTransfer(c.masters.map(_.emits.logical) .reduce(_ mincover _)),
get = shrinkTransfer(c.masters.map(_.emits.get) .reduce(_ mincover _)),
putFull = shrinkTransfer(c.masters.map(_.emits.putFull) .reduce(_ mincover _)),
putPartial = shrinkTransfer(c.masters.map(_.emits.putPartial).reduce(_ mincover _)),
hint = shrinkTransfer(c.masters.map(_.emits.hint) .reduce(_ mincover _))
)
))
))},
managerFn = { m => if (noChangeRequired) m else m.v2copy(slaves = m.slaves.map(mapManager)) }
) {
override def circuitIdentity = noChangeRequired
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = (Seq("TLFragmenter") ++ nameSuffix).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
if (noChangeRequired) {
out <> in
} else {
// All managers must share a common FIFO domain (responses might end up interleaved)
val manager = edgeOut.manager
val managers = manager.managers
val beatBytes = manager.beatBytes
val fifoId = managers(0).fifoId
require (fifoId.isDefined && managers.map(_.fifoId == fifoId).reduce(_ && _))
require (!manager.anySupportAcquireB || !edgeOut.client.anySupportProbe,
s"TLFragmenter (with parent $parent) can't fragment a caching client's requests into a cacheable region")
require (minSize >= beatBytes, s"TLFragmenter (with parent $parent) can't support fragmenting ($minSize) to sub-beat ($beatBytes) accesses")
// We can't support devices which are cached on both sides of us
require (!edgeOut.manager.anySupportAcquireB || !edgeIn.client.anySupportProbe)
// We can't support denied because we reassemble fragments
require (!edgeOut.manager.mayDenyGet || holdFirstDeny, s"TLFragmenter (with parent $parent) can't support denials without holdFirstDeny=true")
require (!edgeOut.manager.mayDenyPut || earlyAck == EarlyAck.None)
/* The Fragmenter is a bit tricky, because there are 5 sizes in play:
* max size -- the maximum transfer size possible
* orig size -- the original pre-fragmenter size
* frag size -- the modified post-fragmenter size
* min size -- the threshold below which frag=orig
* beat size -- the amount transfered on any given beat
*
* The relationships are as follows:
* max >= orig >= frag
* max > min >= beat
* It IS possible that orig <= min (then frag=orig; ie: no fragmentation)
*
* The fragment# (sent via TL.source) is measured in multiples of min size.
* Meanwhile, to track the progress, counters measure in multiples of beat size.
*
* Here is an example of a bus with max=256, min=8, beat=4 and a device supporting 16.
*
* in.A out.A (frag#) out.D (frag#) in.D gen# ack#
* get64 get16 6 ackD16 6 ackD64 12 15
* ackD16 6 ackD64 14
* ackD16 6 ackD64 13
* ackD16 6 ackD64 12
* get16 4 ackD16 4 ackD64 8 11
* ackD16 4 ackD64 10
* ackD16 4 ackD64 9
* ackD16 4 ackD64 8
* get16 2 ackD16 2 ackD64 4 7
* ackD16 2 ackD64 6
* ackD16 2 ackD64 5
* ackD16 2 ackD64 4
* get16 0 ackD16 0 ackD64 0 3
* ackD16 0 ackD64 2
* ackD16 0 ackD64 1
* ackD16 0 ackD64 0
*
* get8 get8 0 ackD8 0 ackD8 0 1
* ackD8 0 ackD8 0
*
* get4 get4 0 ackD4 0 ackD4 0 0
* get1 get1 0 ackD1 0 ackD1 0 0
*
* put64 put16 6 15
* put64 put16 6 14
* put64 put16 6 13
* put64 put16 6 ack16 6 12 12
* put64 put16 4 11
* put64 put16 4 10
* put64 put16 4 9
* put64 put16 4 ack16 4 8 8
* put64 put16 2 7
* put64 put16 2 6
* put64 put16 2 5
* put64 put16 2 ack16 2 4 4
* put64 put16 0 3
* put64 put16 0 2
* put64 put16 0 1
* put64 put16 0 ack16 0 ack64 0 0
*
* put8 put8 0 1
* put8 put8 0 ack8 0 ack8 0 0
*
* put4 put4 0 ack4 0 ack4 0 0
* put1 put1 0 ack1 0 ack1 0 0
*/
val counterBits = log2Up(maxSize/beatBytes)
val maxDownSize = if (alwaysMin) minSize else min(manager.maxTransfer, maxSize)
// Consider the following waveform for two 4-beat bursts:
// ---A----A------------
// -------D-----DDD-DDDD
// Under TL rules, the second A can use the same source as the first A,
// because the source is released for reuse on the first response beat.
//
// However, if we fragment the requests, it looks like this:
// ---3210-3210---------
// -------3-----210-3210
// ... now we've broken the rules because 210 are twice inflight.
//
// This phenomenon means we can have essentially 2*maxSize/minSize-1
// fragmented transactions in flight per original transaction source.
//
// To keep the source unique, we encode the beat counter in the low
// bits of the source. To solve the overlap, we use a toggle bit.
// Whatever toggle bit the D is reassembling, A will use the opposite.
// First, handle the return path
val acknum = RegInit(0.U(counterBits.W))
val dOrig = Reg(UInt())
val dToggle = RegInit(false.B)
val dFragnum = out.d.bits.source(fragmentBits-1, 0)
val dFirst = acknum === 0.U
val dLast = dFragnum === 0.U // only for AccessAck (!Data)
val dsizeOH = UIntToOH (out.d.bits.size, log2Ceil(maxDownSize)+1)
val dsizeOH1 = UIntToOH1(out.d.bits.size, log2Up(maxDownSize))
val dHasData = edgeOut.hasData(out.d.bits)
// calculate new acknum
val acknum_fragment = dFragnum << log2Ceil(minSize/beatBytes)
val acknum_size = dsizeOH1 >> log2Ceil(beatBytes)
assert (!out.d.valid || (acknum_fragment & acknum_size) === 0.U)
val dFirst_acknum = acknum_fragment | Mux(dHasData, acknum_size, 0.U)
val ack_decrement = Mux(dHasData, 1.U, dsizeOH >> log2Ceil(beatBytes))
// calculate the original size
val dFirst_size = OH1ToUInt((dFragnum << log2Ceil(minSize)) | dsizeOH1)
when (out.d.fire) {
acknum := Mux(dFirst, dFirst_acknum, acknum - ack_decrement)
when (dFirst) {
dOrig := dFirst_size
dToggle := out.d.bits.source(fragmentBits)
}
}
// Swallow up non-data ack fragments
val doEarlyAck = earlyAck match {
case EarlyAck.AllPuts => true.B
case EarlyAck.PutFulls => out.d.bits.source(fragmentBits+1)
case EarlyAck.None => false.B
}
val drop = !dHasData && !Mux(doEarlyAck, dFirst, dLast)
out.d.ready := in.d.ready || drop
in.d.valid := out.d.valid && !drop
in.d.bits := out.d.bits // pass most stuff unchanged
in.d.bits.source := out.d.bits.source >> addedBits
in.d.bits.size := Mux(dFirst, dFirst_size, dOrig)
if (edgeOut.manager.mayDenyPut) {
val r_denied = Reg(Bool())
val d_denied = (!dFirst && r_denied) || out.d.bits.denied
when (out.d.fire) { r_denied := d_denied }
in.d.bits.denied := d_denied
}
if (edgeOut.manager.mayDenyGet) {
// Take denied only from the first beat and hold that value
val d_denied = out.d.bits.denied holdUnless dFirst
when (dHasData) {
in.d.bits.denied := d_denied
in.d.bits.corrupt := d_denied || out.d.bits.corrupt
}
}
// What maximum transfer sizes do downstream devices support?
val maxArithmetics = managers.map(_.supportsArithmetic.max)
val maxLogicals = managers.map(_.supportsLogical.max)
val maxGets = managers.map(_.supportsGet.max)
val maxPutFulls = managers.map(_.supportsPutFull.max)
val maxPutPartials = managers.map(_.supportsPutPartial.max)
val maxHints = managers.map(m => if (m.supportsHint) maxDownSize else 0)
// We assume that the request is valid => size 0 is impossible
val lgMinSize = log2Ceil(minSize).U
val maxLgArithmetics = maxArithmetics.map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgLogicals = maxLogicals .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgGets = maxGets .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgPutFulls = maxPutFulls .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgPutPartials = maxPutPartials.map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgHints = maxHints .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
// Make the request repeatable
val repeater = Module(new Repeater(in.a.bits))
repeater.io.enq <> in.a
val in_a = repeater.io.deq
// If this is infront of a single manager, these become constants
val find = manager.findFast(edgeIn.address(in_a.bits))
val maxLgArithmetic = Mux1H(find, maxLgArithmetics)
val maxLgLogical = Mux1H(find, maxLgLogicals)
val maxLgGet = Mux1H(find, maxLgGets)
val maxLgPutFull = Mux1H(find, maxLgPutFulls)
val maxLgPutPartial = Mux1H(find, maxLgPutPartials)
val maxLgHint = Mux1H(find, maxLgHints)
val limit = if (alwaysMin) lgMinSize else
MuxLookup(in_a.bits.opcode, lgMinSize)(Array(
TLMessages.PutFullData -> maxLgPutFull,
TLMessages.PutPartialData -> maxLgPutPartial,
TLMessages.ArithmeticData -> maxLgArithmetic,
TLMessages.LogicalData -> maxLgLogical,
TLMessages.Get -> maxLgGet,
TLMessages.Hint -> maxLgHint))
val aOrig = in_a.bits.size
val aFrag = Mux(aOrig > limit, limit, aOrig)
val aOrigOH1 = UIntToOH1(aOrig, log2Ceil(maxSize))
val aFragOH1 = UIntToOH1(aFrag, log2Up(maxDownSize))
val aHasData = edgeIn.hasData(in_a.bits)
val aMask = Mux(aHasData, 0.U, aFragOH1)
val gennum = RegInit(0.U(counterBits.W))
val aFirst = gennum === 0.U
val old_gennum1 = Mux(aFirst, aOrigOH1 >> log2Ceil(beatBytes), gennum - 1.U)
val new_gennum = ~(~old_gennum1 | (aMask >> log2Ceil(beatBytes))) // ~(~x|y) is width safe
val aFragnum = ~(~(old_gennum1 >> log2Ceil(minSize/beatBytes)) | (aFragOH1 >> log2Ceil(minSize)))
val aLast = aFragnum === 0.U
val aToggle = !Mux(aFirst, dToggle, RegEnable(dToggle, aFirst))
val aFull = if (earlyAck == EarlyAck.PutFulls) Some(in_a.bits.opcode === TLMessages.PutFullData) else None
when (out.a.fire) { gennum := new_gennum }
repeater.io.repeat := !aHasData && aFragnum =/= 0.U
out.a <> in_a
out.a.bits.address := in_a.bits.address | ~(old_gennum1 << log2Ceil(beatBytes) | ~aOrigOH1 | aFragOH1 | (minSize-1).U)
out.a.bits.source := Cat(Seq(in_a.bits.source) ++ aFull ++ Seq(aToggle.asUInt, aFragnum))
out.a.bits.size := aFrag
// Optimize away some of the Repeater's registers
assert (!repeater.io.full || !aHasData)
out.a.bits.data := in.a.bits.data
val fullMask = ((BigInt(1) << beatBytes) - 1).U
assert (!repeater.io.full || in_a.bits.mask === fullMask)
out.a.bits.mask := Mux(repeater.io.full, fullMask, in.a.bits.mask)
out.a.bits.user.waiveAll :<= in.a.bits.user.subset(_.isData)
// Tie off unused channels
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLFragmenter
{
def apply(minSize: Int, maxSize: Int, alwaysMin: Boolean = false, earlyAck: EarlyAck.T = EarlyAck.None, holdFirstDeny: Boolean = false, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode =
{
if (minSize <= maxSize) {
val fragmenter = LazyModule(new TLFragmenter(minSize, maxSize, alwaysMin, earlyAck, holdFirstDeny, nameSuffix))
fragmenter.node
} else { TLEphemeralNode()(ValName("no_fragmenter")) }
}
def apply(wrapper: TLBusWrapper, nameSuffix: Option[String])(implicit p: Parameters): TLNode = apply(wrapper.beatBytes, wrapper.blockBytes, nameSuffix = nameSuffix)
def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper, None)
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMFragmenter(ramBeatBytes: Int, maxSize: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("Fragmenter"))
val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff), beatBytes = ramBeatBytes))
(ram.node
:= TLDelayer(0.1)
:= TLBuffer(BufferParams.flow)
:= TLDelayer(0.1)
:= TLFragmenter(ramBeatBytes, maxSize, earlyAck = EarlyAck.AllPuts)
:= TLDelayer(0.1)
:= TLBuffer(BufferParams.flow)
:= TLFragmenter(ramBeatBytes, maxSize/2)
:= TLDelayer(0.1)
:= TLBuffer(BufferParams.flow)
:= model.node
:= fuzz.node)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMFragmenterTest(ramBeatBytes: Int, maxSize: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMFragmenter(ramBeatBytes,maxSize,txns)).module)
io.finished := dut.io.finished
dut.io.start := io.start
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
| module TLFragmenter( // @[Fragmenter.scala:92:9]
input clock, // @[Fragmenter.scala:92:9]
input reset, // @[Fragmenter.scala:92:9]
output auto_anon_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [6:0] auto_anon_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [14:0] auto_anon_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_a_bits_user_amba_prot_bufferable, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_a_bits_user_amba_prot_modifiable, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_a_bits_user_amba_prot_readalloc, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_a_bits_user_amba_prot_writealloc, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_a_bits_user_amba_prot_privileged, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_a_bits_user_amba_prot_secure, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_a_bits_user_amba_prot_fetch, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_anon_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_anon_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [6:0] auto_anon_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_anon_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_anon_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [10:0] auto_anon_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [14:0] auto_anon_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_a_bits_user_amba_prot_bufferable, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_a_bits_user_amba_prot_modifiable, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_a_bits_user_amba_prot_readalloc, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_a_bits_user_amba_prot_writealloc, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_a_bits_user_amba_prot_privileged, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_a_bits_user_amba_prot_secure, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_a_bits_user_amba_prot_fetch, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_anon_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_anon_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_anon_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [10:0] auto_anon_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_anon_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_d_bits_corrupt // @[LazyModuleImp.scala:107:25]
);
wire _repeater_io_full; // @[Fragmenter.scala:274:30]
wire _repeater_io_enq_ready; // @[Fragmenter.scala:274:30]
wire _repeater_io_deq_valid; // @[Fragmenter.scala:274:30]
wire [2:0] _repeater_io_deq_bits_opcode; // @[Fragmenter.scala:274:30]
wire [2:0] _repeater_io_deq_bits_size; // @[Fragmenter.scala:274:30]
wire [6:0] _repeater_io_deq_bits_source; // @[Fragmenter.scala:274:30]
wire [14:0] _repeater_io_deq_bits_address; // @[Fragmenter.scala:274:30]
wire [7:0] _repeater_io_deq_bits_mask; // @[Fragmenter.scala:274:30]
reg [2:0] acknum; // @[Fragmenter.scala:201:29]
reg [2:0] dOrig; // @[Fragmenter.scala:202:24]
reg dToggle; // @[Fragmenter.scala:203:30]
wire [5:0] _dsizeOH1_T = 6'h7 << auto_anon_out_d_bits_size; // @[package.scala:243:71]
wire [2:0] _GEN = ~(auto_anon_out_d_bits_source[2:0]); // @[package.scala:241:49]
wire [2:0] dFirst_size_hi = auto_anon_out_d_bits_source[2:0] & {1'h1, _GEN[2:1]}; // @[OneHot.scala:30:18]
wire [2:0] _dFirst_size_T_8 = {1'h0, dFirst_size_hi[2:1]} | ~(_dsizeOH1_T[2:0]) & {_GEN[0], _dsizeOH1_T[2:1]}; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [2:0] dFirst_size = {|dFirst_size_hi, |(_dFirst_size_T_8[2:1]), _dFirst_size_T_8[2] | _dFirst_size_T_8[0]}; // @[OneHot.scala:30:18, :31:18, :32:{10,14,28}]
wire drop = ~(auto_anon_out_d_bits_opcode[0]) & (|(auto_anon_out_d_bits_source[2:0])); // @[Fragmenter.scala:204:41, :206:30, :234:{20,30}]
wire anonOut_d_ready = auto_anon_in_d_ready | drop; // @[Fragmenter.scala:234:30, :235:35]
wire anonIn_d_valid = auto_anon_out_d_valid & ~drop; // @[Fragmenter.scala:234:30, :236:{36,39}]
wire [2:0] anonIn_d_bits_size = (|acknum) ? dOrig : dFirst_size; // @[OneHot.scala:32:10]
reg r_denied; // @[Fragmenter.scala:242:29]
wire d_denied = (|acknum) & r_denied | auto_anon_out_d_bits_denied; // @[Fragmenter.scala:201:29, :205:29, :242:29, :243:{35,48}]
reg d_denied_r; // @[package.scala:88:63]
wire d_denied_1 = (|acknum) ? d_denied_r : auto_anon_out_d_bits_denied; // @[package.scala:88:{42,63}]
wire anonIn_d_bits_denied = auto_anon_out_d_bits_opcode[0] ? d_denied_1 : d_denied; // @[package.scala:88:42]
wire anonIn_d_bits_corrupt = auto_anon_out_d_bits_opcode[0] & d_denied_1 | auto_anon_out_d_bits_corrupt; // @[package.scala:88:42]
wire [12:0] _aOrigOH1_T = 13'h3F << _repeater_io_deq_bits_size; // @[package.scala:243:71]
reg [2:0] gennum; // @[Fragmenter.scala:303:29]
wire aFirst = gennum == 3'h0; // @[Fragmenter.scala:303:29, :304:29]
wire [2:0] aFragnum = aFirst ? ~(_aOrigOH1_T[5:3]) : gennum - 3'h1; // @[package.scala:243:{46,71,76}]
reg aToggle_r; // @[Fragmenter.scala:309:54] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_31( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [11:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [7:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [11:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [7:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_27 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_29 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_33 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_35 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_51 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_53 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_57 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_59 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_63 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_65 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_69 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_71 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_75 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_77 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_81 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_83 = 1'h1; // @[Parameters.scala:57:20]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_first_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_first_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_first_WIRE_2_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_first_WIRE_3_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_set_wo_ready_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_set_wo_ready_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_set_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_set_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_opcodes_set_interm_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_opcodes_set_interm_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_sizes_set_interm_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_sizes_set_interm_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_opcodes_set_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_opcodes_set_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_sizes_set_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_sizes_set_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_probe_ack_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_probe_ack_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_probe_ack_WIRE_2_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_probe_ack_WIRE_3_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _same_cycle_resp_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _same_cycle_resp_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _same_cycle_resp_WIRE_2_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _same_cycle_resp_WIRE_3_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _same_cycle_resp_WIRE_4_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _same_cycle_resp_WIRE_5_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_first_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_first_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_first_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_first_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_set_wo_ready_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_set_wo_ready_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_opcodes_set_interm_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_opcodes_set_interm_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_sizes_set_interm_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_sizes_set_interm_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_opcodes_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_opcodes_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_sizes_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_sizes_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_probe_ack_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_probe_ack_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_probe_ack_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_probe_ack_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _same_cycle_resp_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _same_cycle_resp_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _same_cycle_resp_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _same_cycle_resp_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _same_cycle_resp_WIRE_4_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _same_cycle_resp_WIRE_5_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [2050:0] _c_opcodes_set_T_1 = 2051'h0; // @[Monitor.scala:767:54]
wire [2050:0] _c_sizes_set_T_1 = 2051'h0; // @[Monitor.scala:768:52]
wire [10:0] _c_opcodes_set_T = 11'h0; // @[Monitor.scala:767:79]
wire [10:0] _c_sizes_set_T = 11'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [255:0] _c_set_wo_ready_T = 256'h1; // @[OneHot.scala:58:35]
wire [255:0] _c_set_T = 256'h1; // @[OneHot.scala:58:35]
wire [515:0] c_opcodes_set = 516'h0; // @[Monitor.scala:740:34]
wire [515:0] c_sizes_set = 516'h0; // @[Monitor.scala:741:34]
wire [128:0] c_set = 129'h0; // @[Monitor.scala:738:34]
wire [128:0] c_set_wo_ready = 129'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [7:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_8 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_9 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_10 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_11 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 8'h20; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[2:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[7:3]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[7:3]; // @[Monitor.scala:36:7]
wire _source_ok_T_2 = _source_ok_T_1 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[2:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_8 = _source_ok_T_7 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [5:0] _source_ok_T_13 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_19 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_25 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_31 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_14 = _source_ok_T_13 == 6'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_20 = _source_ok_T_19 == 6'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_26 = _source_ok_T_25 == 6'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_28 = _source_ok_T_26; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_30 = _source_ok_T_28; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_5 = _source_ok_T_30; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_32 = _source_ok_T_31 == 6'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_34 = _source_ok_T_32; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_36 = _source_ok_T_34; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_6 = _source_ok_T_36; // @[Parameters.scala:1138:31]
wire _source_ok_T_37 = io_in_a_bits_source_0 == 8'h41; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_7 = _source_ok_T_37; // @[Parameters.scala:1138:31]
wire _source_ok_T_38 = io_in_a_bits_source_0 == 8'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_8 = _source_ok_T_38; // @[Parameters.scala:1138:31]
wire _source_ok_T_39 = io_in_a_bits_source_0 == 8'h80; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_9 = _source_ok_T_39; // @[Parameters.scala:1138:31]
wire _source_ok_T_40 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_41 = _source_ok_T_40 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_42 = _source_ok_T_41 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_43 = _source_ok_T_42 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_44 = _source_ok_T_43 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_45 = _source_ok_T_44 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_46 = _source_ok_T_45 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_47 = _source_ok_T_46 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_47 | _source_ok_WIRE_9; // @[Parameters.scala:1138:31, :1139:46]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [11:0] _is_aligned_T = {6'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 12'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [2:0] uncommonBits = _uncommonBits_T[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_1 = _uncommonBits_T_1[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_6 = _uncommonBits_T_6[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_7 = _uncommonBits_T_7[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_12 = _uncommonBits_T_12[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_13 = _uncommonBits_T_13[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_18 = _uncommonBits_T_18[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_19 = _uncommonBits_T_19[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_24 = _uncommonBits_T_24[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_25 = _uncommonBits_T_25[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_30 = _uncommonBits_T_30[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_31 = _uncommonBits_T_31[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_36 = _uncommonBits_T_36[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_37 = _uncommonBits_T_37[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_39 = _uncommonBits_T_39[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_42 = _uncommonBits_T_42[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_43 = _uncommonBits_T_43[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_44 = _uncommonBits_T_44[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_46 = _uncommonBits_T_46[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_47 = _uncommonBits_T_47[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_48 = _uncommonBits_T_48[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_49 = _uncommonBits_T_49[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_52 = _uncommonBits_T_52[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_53 = _uncommonBits_T_53[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_48 = io_in_d_bits_source_0 == 8'h20; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_48; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[2:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_49 = io_in_d_bits_source_0[7:3]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_55 = io_in_d_bits_source_0[7:3]; // @[Monitor.scala:36:7]
wire _source_ok_T_50 = _source_ok_T_49 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_52 = _source_ok_T_50; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_54 = _source_ok_T_52; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_54; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[2:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_56 = _source_ok_T_55 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_58 = _source_ok_T_56; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_60 = _source_ok_T_58; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_60; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [5:0] _source_ok_T_61 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_67 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_73 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_79 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_62 = _source_ok_T_61 == 6'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_64 = _source_ok_T_62; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_66 = _source_ok_T_64; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_66; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_68 = _source_ok_T_67 == 6'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_70 = _source_ok_T_68; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_72 = _source_ok_T_70; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_4 = _source_ok_T_72; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_10 = _source_ok_uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_74 = _source_ok_T_73 == 6'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_76 = _source_ok_T_74; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_78 = _source_ok_T_76; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_5 = _source_ok_T_78; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_11 = _source_ok_uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_80 = _source_ok_T_79 == 6'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_82 = _source_ok_T_80; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_84 = _source_ok_T_82; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_6 = _source_ok_T_84; // @[Parameters.scala:1138:31]
wire _source_ok_T_85 = io_in_d_bits_source_0 == 8'h41; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_7 = _source_ok_T_85; // @[Parameters.scala:1138:31]
wire _source_ok_T_86 = io_in_d_bits_source_0 == 8'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_8 = _source_ok_T_86; // @[Parameters.scala:1138:31]
wire _source_ok_T_87 = io_in_d_bits_source_0 == 8'h80; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_9 = _source_ok_T_87; // @[Parameters.scala:1138:31]
wire _source_ok_T_88 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_89 = _source_ok_T_88 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_90 = _source_ok_T_89 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_91 = _source_ok_T_90 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_92 = _source_ok_T_91 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_93 = _source_ok_T_92 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_94 = _source_ok_T_93 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_95 = _source_ok_T_94 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_95 | _source_ok_WIRE_1_9; // @[Parameters.scala:1138:31, :1139:46]
wire _T_1115 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1115; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1115; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [7:0] source; // @[Monitor.scala:390:22]
reg [11:0] address; // @[Monitor.scala:391:22]
wire _T_1183 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1183; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1183; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1183; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [7:0] source_1; // @[Monitor.scala:541:22]
reg [128:0] inflight; // @[Monitor.scala:614:27]
reg [515:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [515:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [128:0] a_set; // @[Monitor.scala:626:34]
wire [128:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [515:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [515:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [10:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [10:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [10:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [10:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [10:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [10:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [10:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [10:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [10:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [515:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [515:0] _a_opcode_lookup_T_6 = {512'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [515:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[515:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [515:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [515:0] _a_size_lookup_T_6 = {512'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [515:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[515:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [255:0] _GEN_2 = 256'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [255:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [255:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire _T_1048 = _T_1115 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_1048 ? _a_set_T[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_1048 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_1048 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [10:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [10:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [10:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [2050:0] _a_opcodes_set_T_1 = {2047'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_1048 ? _a_opcodes_set_T_1[515:0] : 516'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [2050:0] _a_sizes_set_T_1 = {2047'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_1048 ? _a_sizes_set_T_1[515:0] : 516'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [128:0] d_clr; // @[Monitor.scala:664:34]
wire [128:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [515:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [515:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_1094 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [255:0] _GEN_5 = 256'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1094 & ~d_release_ack ? _d_clr_wo_ready_T[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire _T_1063 = _T_1183 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_1063 ? _d_clr_T[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire [2062:0] _d_opcodes_clr_T_5 = 2063'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_1063 ? _d_opcodes_clr_T_5[515:0] : 516'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [2062:0] _d_sizes_clr_T_5 = 2063'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_1063 ? _d_sizes_clr_T_5[515:0] : 516'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [128:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [128:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [128:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [515:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [515:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [515:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [515:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [515:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [515:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [128:0] inflight_1; // @[Monitor.scala:726:35]
wire [128:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [515:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [515:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [515:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [515:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [515:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [515:0] _c_opcode_lookup_T_6 = {512'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [515:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[515:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [515:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [515:0] _c_size_lookup_T_6 = {512'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [515:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[515:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [128:0] d_clr_1; // @[Monitor.scala:774:34]
wire [128:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [515:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [515:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1159 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1159 & d_release_ack_1 ? _d_clr_wo_ready_T_1[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire _T_1141 = _T_1183 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1141 ? _d_clr_T_1[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire [2062:0] _d_opcodes_clr_T_11 = 2063'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1141 ? _d_opcodes_clr_T_11[515:0] : 516'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [2062:0] _d_sizes_clr_T_11 = 2063'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1141 ? _d_sizes_clr_T_11[515:0] : 516'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 8'h0; // @[Monitor.scala:36:7, :795:113]
wire [128:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [128:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [515:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [515:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [515:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [515:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File Repeater.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{Decoupled, DecoupledIO}
// A Repeater passes its input to its output, unless repeat is asserted.
// When repeat is asserted, the Repeater copies the input and repeats it next cycle.
class Repeater[T <: Data](gen: T) extends Module
{
override def desiredName = s"Repeater_${gen.typeName}"
val io = IO( new Bundle {
val repeat = Input(Bool())
val full = Output(Bool())
val enq = Flipped(Decoupled(gen.cloneType))
val deq = Decoupled(gen.cloneType)
} )
val full = RegInit(false.B)
val saved = Reg(gen.cloneType)
// When !full, a repeater is pass-through
io.deq.valid := io.enq.valid || full
io.enq.ready := io.deq.ready && !full
io.deq.bits := Mux(full, saved, io.enq.bits)
io.full := full
when (io.enq.fire && io.repeat) { full := true.B; saved := io.enq.bits }
when (io.deq.fire && !io.repeat) { full := false.B }
}
object Repeater
{
def apply[T <: Data](enq: DecoupledIO[T], repeat: Bool): DecoupledIO[T] = {
val repeater = Module(new Repeater(chiselTypeOf(enq.bits)))
repeater.io.repeat := repeat
repeater.io.enq <> enq
repeater.io.deq
}
}
| module Repeater_TLBundleA_a21d64s10k1z3u( // @[Repeater.scala:10:7]
input clock, // @[Repeater.scala:10:7]
input reset, // @[Repeater.scala:10:7]
input io_repeat, // @[Repeater.scala:13:14]
output io_full, // @[Repeater.scala:13:14]
output io_enq_ready, // @[Repeater.scala:13:14]
input io_enq_valid, // @[Repeater.scala:13:14]
input [2:0] io_enq_bits_opcode, // @[Repeater.scala:13:14]
input [2:0] io_enq_bits_param, // @[Repeater.scala:13:14]
input [2:0] io_enq_bits_size, // @[Repeater.scala:13:14]
input [9:0] io_enq_bits_source, // @[Repeater.scala:13:14]
input [20:0] io_enq_bits_address, // @[Repeater.scala:13:14]
input [7:0] io_enq_bits_mask, // @[Repeater.scala:13:14]
input io_enq_bits_corrupt, // @[Repeater.scala:13:14]
input io_deq_ready, // @[Repeater.scala:13:14]
output io_deq_valid, // @[Repeater.scala:13:14]
output [2:0] io_deq_bits_opcode, // @[Repeater.scala:13:14]
output [2:0] io_deq_bits_param, // @[Repeater.scala:13:14]
output [2:0] io_deq_bits_size, // @[Repeater.scala:13:14]
output [9:0] io_deq_bits_source, // @[Repeater.scala:13:14]
output [20:0] io_deq_bits_address, // @[Repeater.scala:13:14]
output [7:0] io_deq_bits_mask, // @[Repeater.scala:13:14]
output io_deq_bits_corrupt // @[Repeater.scala:13:14]
);
reg full; // @[Repeater.scala:20:21]
reg [2:0] saved_opcode; // @[Repeater.scala:21:18]
reg [2:0] saved_param; // @[Repeater.scala:21:18]
reg [2:0] saved_size; // @[Repeater.scala:21:18]
reg [9:0] saved_source; // @[Repeater.scala:21:18]
reg [20:0] saved_address; // @[Repeater.scala:21:18]
reg [7:0] saved_mask; // @[Repeater.scala:21:18]
reg saved_corrupt; // @[Repeater.scala:21:18]
wire io_deq_valid_0 = io_enq_valid | full; // @[Repeater.scala:20:21, :24:32]
wire io_enq_ready_0 = io_deq_ready & ~full; // @[Repeater.scala:20:21, :25:{32,35}]
wire _GEN = io_enq_ready_0 & io_enq_valid & io_repeat; // @[Decoupled.scala:51:35]
always @(posedge clock) begin // @[Repeater.scala:10:7]
if (reset) // @[Repeater.scala:10:7]
full <= 1'h0; // @[Repeater.scala:20:21]
else // @[Repeater.scala:10:7]
full <= ~(io_deq_ready & io_deq_valid_0 & ~io_repeat) & (_GEN | full); // @[Decoupled.scala:51:35]
if (_GEN) begin // @[Decoupled.scala:51:35]
saved_opcode <= io_enq_bits_opcode; // @[Repeater.scala:21:18]
saved_param <= io_enq_bits_param; // @[Repeater.scala:21:18]
saved_size <= io_enq_bits_size; // @[Repeater.scala:21:18]
saved_source <= io_enq_bits_source; // @[Repeater.scala:21:18]
saved_address <= io_enq_bits_address; // @[Repeater.scala:21:18]
saved_mask <= io_enq_bits_mask; // @[Repeater.scala:21:18]
saved_corrupt <= io_enq_bits_corrupt; // @[Repeater.scala:21:18]
end
always @(posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File MSHR.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import freechips.rocketchip.tilelink._
import TLPermissions._
import TLMessages._
import MetaData._
import chisel3.PrintableHelper
import chisel3.experimental.dataview._
class ScheduleRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val a = Valid(new SourceARequest(params))
val b = Valid(new SourceBRequest(params))
val c = Valid(new SourceCRequest(params))
val d = Valid(new SourceDRequest(params))
val e = Valid(new SourceERequest(params))
val x = Valid(new SourceXRequest(params))
val dir = Valid(new DirectoryWrite(params))
val reload = Bool() // get next request via allocate (if any)
}
class MSHRStatus(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val way = UInt(params.wayBits.W)
val blockB = Bool()
val nestB = Bool()
val blockC = Bool()
val nestC = Bool()
}
class NestedWriteback(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val b_toN = Bool() // nested Probes may unhit us
val b_toB = Bool() // nested Probes may demote us
val b_clr_dirty = Bool() // nested Probes clear dirty
val c_set_dirty = Bool() // nested Releases MAY set dirty
}
sealed trait CacheState
{
val code = CacheState.index.U
CacheState.index = CacheState.index + 1
}
object CacheState
{
var index = 0
}
case object S_INVALID extends CacheState
case object S_BRANCH extends CacheState
case object S_BRANCH_C extends CacheState
case object S_TIP extends CacheState
case object S_TIP_C extends CacheState
case object S_TIP_CD extends CacheState
case object S_TIP_D extends CacheState
case object S_TRUNK_C extends CacheState
case object S_TRUNK_CD extends CacheState
class MSHR(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val allocate = Flipped(Valid(new AllocateRequest(params))) // refills MSHR for next cycle
val directory = Flipped(Valid(new DirectoryResult(params))) // triggers schedule setup
val status = Valid(new MSHRStatus(params))
val schedule = Decoupled(new ScheduleRequest(params))
val sinkc = Flipped(Valid(new SinkCResponse(params)))
val sinkd = Flipped(Valid(new SinkDResponse(params)))
val sinke = Flipped(Valid(new SinkEResponse(params)))
val nestedwb = Flipped(new NestedWriteback(params))
})
val request_valid = RegInit(false.B)
val request = Reg(new FullRequest(params))
val meta_valid = RegInit(false.B)
val meta = Reg(new DirectoryResult(params))
// Define which states are valid
when (meta_valid) {
when (meta.state === INVALID) {
assert (!meta.clients.orR)
assert (!meta.dirty)
}
when (meta.state === BRANCH) {
assert (!meta.dirty)
}
when (meta.state === TRUNK) {
assert (meta.clients.orR)
assert ((meta.clients & (meta.clients - 1.U)) === 0.U) // at most one
}
when (meta.state === TIP) {
// noop
}
}
// Completed transitions (s_ = scheduled), (w_ = waiting)
val s_rprobe = RegInit(true.B) // B
val w_rprobeackfirst = RegInit(true.B)
val w_rprobeacklast = RegInit(true.B)
val s_release = RegInit(true.B) // CW w_rprobeackfirst
val w_releaseack = RegInit(true.B)
val s_pprobe = RegInit(true.B) // B
val s_acquire = RegInit(true.B) // A s_release, s_pprobe [1]
val s_flush = RegInit(true.B) // X w_releaseack
val w_grantfirst = RegInit(true.B)
val w_grantlast = RegInit(true.B)
val w_grant = RegInit(true.B) // first | last depending on wormhole
val w_pprobeackfirst = RegInit(true.B)
val w_pprobeacklast = RegInit(true.B)
val w_pprobeack = RegInit(true.B) // first | last depending on wormhole
val s_probeack = RegInit(true.B) // C w_pprobeackfirst (mutually exclusive with next two s_*)
val s_grantack = RegInit(true.B) // E w_grantfirst ... CAN require both outE&inD to service outD
val s_execute = RegInit(true.B) // D w_pprobeack, w_grant
val w_grantack = RegInit(true.B)
val s_writeback = RegInit(true.B) // W w_*
// [1]: We cannot issue outer Acquire while holding blockB (=> outA can stall)
// However, inB and outC are higher priority than outB, so s_release and s_pprobe
// may be safely issued while blockB. Thus we must NOT try to schedule the
// potentially stuck s_acquire with either of them (scheduler is all or none).
// Meta-data that we discover underway
val sink = Reg(UInt(params.outer.bundle.sinkBits.W))
val gotT = Reg(Bool())
val bad_grant = Reg(Bool())
val probes_done = Reg(UInt(params.clientBits.W))
val probes_toN = Reg(UInt(params.clientBits.W))
val probes_noT = Reg(Bool())
// When a nested transaction completes, update our meta data
when (meta_valid && meta.state =/= INVALID &&
io.nestedwb.set === request.set && io.nestedwb.tag === meta.tag) {
when (io.nestedwb.b_clr_dirty) { meta.dirty := false.B }
when (io.nestedwb.c_set_dirty) { meta.dirty := true.B }
when (io.nestedwb.b_toB) { meta.state := BRANCH }
when (io.nestedwb.b_toN) { meta.hit := false.B }
}
// Scheduler status
io.status.valid := request_valid
io.status.bits.set := request.set
io.status.bits.tag := request.tag
io.status.bits.way := meta.way
io.status.bits.blockB := !meta_valid || ((!w_releaseack || !w_rprobeacklast || !w_pprobeacklast) && !w_grantfirst)
io.status.bits.nestB := meta_valid && w_releaseack && w_rprobeacklast && w_pprobeacklast && !w_grantfirst
// The above rules ensure we will block and not nest an outer probe while still doing our
// own inner probes. Thus every probe wakes exactly one MSHR.
io.status.bits.blockC := !meta_valid
io.status.bits.nestC := meta_valid && (!w_rprobeackfirst || !w_pprobeackfirst || !w_grantfirst)
// The w_grantfirst in nestC is necessary to deal with:
// acquire waiting for grant, inner release gets queued, outer probe -> inner probe -> deadlock
// ... this is possible because the release+probe can be for same set, but different tag
// We can only demand: block, nest, or queue
assert (!io.status.bits.nestB || !io.status.bits.blockB)
assert (!io.status.bits.nestC || !io.status.bits.blockC)
// Scheduler requests
val no_wait = w_rprobeacklast && w_releaseack && w_grantlast && w_pprobeacklast && w_grantack
io.schedule.bits.a.valid := !s_acquire && s_release && s_pprobe
io.schedule.bits.b.valid := !s_rprobe || !s_pprobe
io.schedule.bits.c.valid := (!s_release && w_rprobeackfirst) || (!s_probeack && w_pprobeackfirst)
io.schedule.bits.d.valid := !s_execute && w_pprobeack && w_grant
io.schedule.bits.e.valid := !s_grantack && w_grantfirst
io.schedule.bits.x.valid := !s_flush && w_releaseack
io.schedule.bits.dir.valid := (!s_release && w_rprobeackfirst) || (!s_writeback && no_wait)
io.schedule.bits.reload := no_wait
io.schedule.valid := io.schedule.bits.a.valid || io.schedule.bits.b.valid || io.schedule.bits.c.valid ||
io.schedule.bits.d.valid || io.schedule.bits.e.valid || io.schedule.bits.x.valid ||
io.schedule.bits.dir.valid
// Schedule completions
when (io.schedule.ready) {
s_rprobe := true.B
when (w_rprobeackfirst) { s_release := true.B }
s_pprobe := true.B
when (s_release && s_pprobe) { s_acquire := true.B }
when (w_releaseack) { s_flush := true.B }
when (w_pprobeackfirst) { s_probeack := true.B }
when (w_grantfirst) { s_grantack := true.B }
when (w_pprobeack && w_grant) { s_execute := true.B }
when (no_wait) { s_writeback := true.B }
// Await the next operation
when (no_wait) {
request_valid := false.B
meta_valid := false.B
}
}
// Resulting meta-data
val final_meta_writeback = WireInit(meta)
val req_clientBit = params.clientBit(request.source)
val req_needT = needT(request.opcode, request.param)
val req_acquire = request.opcode === AcquireBlock || request.opcode === AcquirePerm
val meta_no_clients = !meta.clients.orR
val req_promoteT = req_acquire && Mux(meta.hit, meta_no_clients && meta.state === TIP, gotT)
when (request.prio(2) && (!params.firstLevel).B) { // always a hit
final_meta_writeback.dirty := meta.dirty || request.opcode(0)
final_meta_writeback.state := Mux(request.param =/= TtoT && meta.state === TRUNK, TIP, meta.state)
final_meta_writeback.clients := meta.clients & ~Mux(isToN(request.param), req_clientBit, 0.U)
final_meta_writeback.hit := true.B // chained requests are hits
} .elsewhen (request.control && params.control.B) { // request.prio(0)
when (meta.hit) {
final_meta_writeback.dirty := false.B
final_meta_writeback.state := INVALID
final_meta_writeback.clients := meta.clients & ~probes_toN
}
final_meta_writeback.hit := false.B
} .otherwise {
final_meta_writeback.dirty := (meta.hit && meta.dirty) || !request.opcode(2)
final_meta_writeback.state := Mux(req_needT,
Mux(req_acquire, TRUNK, TIP),
Mux(!meta.hit, Mux(gotT, Mux(req_acquire, TRUNK, TIP), BRANCH),
MuxLookup(meta.state, 0.U(2.W))(Seq(
INVALID -> BRANCH,
BRANCH -> BRANCH,
TRUNK -> TIP,
TIP -> Mux(meta_no_clients && req_acquire, TRUNK, TIP)))))
final_meta_writeback.clients := Mux(meta.hit, meta.clients & ~probes_toN, 0.U) |
Mux(req_acquire, req_clientBit, 0.U)
final_meta_writeback.tag := request.tag
final_meta_writeback.hit := true.B
}
when (bad_grant) {
when (meta.hit) {
// upgrade failed (B -> T)
assert (!meta_valid || meta.state === BRANCH)
final_meta_writeback.hit := true.B
final_meta_writeback.dirty := false.B
final_meta_writeback.state := BRANCH
final_meta_writeback.clients := meta.clients & ~probes_toN
} .otherwise {
// failed N -> (T or B)
final_meta_writeback.hit := false.B
final_meta_writeback.dirty := false.B
final_meta_writeback.state := INVALID
final_meta_writeback.clients := 0.U
}
}
val invalid = Wire(new DirectoryEntry(params))
invalid.dirty := false.B
invalid.state := INVALID
invalid.clients := 0.U
invalid.tag := 0.U
// Just because a client says BtoT, by the time we process the request he may be N.
// Therefore, we must consult our own meta-data state to confirm he owns the line still.
val honour_BtoT = meta.hit && (meta.clients & req_clientBit).orR
// The client asking us to act is proof they don't have permissions.
val excluded_client = Mux(meta.hit && request.prio(0) && skipProbeN(request.opcode, params.cache.hintsSkipProbe), req_clientBit, 0.U)
io.schedule.bits.a.bits.tag := request.tag
io.schedule.bits.a.bits.set := request.set
io.schedule.bits.a.bits.param := Mux(req_needT, Mux(meta.hit, BtoT, NtoT), NtoB)
io.schedule.bits.a.bits.block := request.size =/= log2Ceil(params.cache.blockBytes).U ||
!(request.opcode === PutFullData || request.opcode === AcquirePerm)
io.schedule.bits.a.bits.source := 0.U
io.schedule.bits.b.bits.param := Mux(!s_rprobe, toN, Mux(request.prio(1), request.param, Mux(req_needT, toN, toB)))
io.schedule.bits.b.bits.tag := Mux(!s_rprobe, meta.tag, request.tag)
io.schedule.bits.b.bits.set := request.set
io.schedule.bits.b.bits.clients := meta.clients & ~excluded_client
io.schedule.bits.c.bits.opcode := Mux(meta.dirty, ReleaseData, Release)
io.schedule.bits.c.bits.param := Mux(meta.state === BRANCH, BtoN, TtoN)
io.schedule.bits.c.bits.source := 0.U
io.schedule.bits.c.bits.tag := meta.tag
io.schedule.bits.c.bits.set := request.set
io.schedule.bits.c.bits.way := meta.way
io.schedule.bits.c.bits.dirty := meta.dirty
io.schedule.bits.d.bits.viewAsSupertype(chiselTypeOf(request)) := request
io.schedule.bits.d.bits.param := Mux(!req_acquire, request.param,
MuxLookup(request.param, request.param)(Seq(
NtoB -> Mux(req_promoteT, NtoT, NtoB),
BtoT -> Mux(honour_BtoT, BtoT, NtoT),
NtoT -> NtoT)))
io.schedule.bits.d.bits.sink := 0.U
io.schedule.bits.d.bits.way := meta.way
io.schedule.bits.d.bits.bad := bad_grant
io.schedule.bits.e.bits.sink := sink
io.schedule.bits.x.bits.fail := false.B
io.schedule.bits.dir.bits.set := request.set
io.schedule.bits.dir.bits.way := meta.way
io.schedule.bits.dir.bits.data := Mux(!s_release, invalid, WireInit(new DirectoryEntry(params), init = final_meta_writeback))
// Coverage of state transitions
def cacheState(entry: DirectoryEntry, hit: Bool) = {
val out = WireDefault(0.U)
val c = entry.clients.orR
val d = entry.dirty
switch (entry.state) {
is (BRANCH) { out := Mux(c, S_BRANCH_C.code, S_BRANCH.code) }
is (TRUNK) { out := Mux(d, S_TRUNK_CD.code, S_TRUNK_C.code) }
is (TIP) { out := Mux(c, Mux(d, S_TIP_CD.code, S_TIP_C.code), Mux(d, S_TIP_D.code, S_TIP.code)) }
is (INVALID) { out := S_INVALID.code }
}
when (!hit) { out := S_INVALID.code }
out
}
val p = !params.lastLevel // can be probed
val c = !params.firstLevel // can be acquired
val m = params.inner.client.clients.exists(!_.supports.probe) // can be written (or read)
val r = params.outer.manager.managers.exists(!_.alwaysGrantsT) // read-only devices exist
val f = params.control // flush control register exists
val cfg = (p, c, m, r, f)
val b = r || p // can reach branch state (via probe downgrade or read-only device)
// The cache must be used for something or we would not be here
require(c || m)
val evict = cacheState(meta, !meta.hit)
val before = cacheState(meta, meta.hit)
val after = cacheState(final_meta_writeback, true.B)
def eviction(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(evict === from.code, s"MSHR_${from}_EVICT", s"State transition from ${from} to evicted ${cfg}")
} else {
assert(!(evict === from.code), cf"State transition from ${from} to evicted should be impossible ${cfg}")
}
if (cover && f) {
params.ccover(before === from.code, s"MSHR_${from}_FLUSH", s"State transition from ${from} to flushed ${cfg}")
} else {
assert(!(before === from.code), cf"State transition from ${from} to flushed should be impossible ${cfg}")
}
}
def transition(from: CacheState, to: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(before === from.code && after === to.code, s"MSHR_${from}_${to}", s"State transition from ${from} to ${to} ${cfg}")
} else {
assert(!(before === from.code && after === to.code), cf"State transition from ${from} to ${to} should be impossible ${cfg}")
}
}
when ((!s_release && w_rprobeackfirst) && io.schedule.ready) {
eviction(S_BRANCH, b) // MMIO read to read-only device
eviction(S_BRANCH_C, b && c) // you need children to become C
eviction(S_TIP, true) // MMIO read || clean release can lead to this state
eviction(S_TIP_C, c) // needs two clients || client + mmio || downgrading client
eviction(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client
eviction(S_TIP_D, true) // MMIO write || dirty release lead here
eviction(S_TRUNK_C, c) // acquire for write
eviction(S_TRUNK_CD, c) // dirty release then reacquire
}
when ((!s_writeback && no_wait) && io.schedule.ready) {
transition(S_INVALID, S_BRANCH, b && m) // only MMIO can bring us to BRANCH state
transition(S_INVALID, S_BRANCH_C, b && c) // C state is only possible if there are inner caches
transition(S_INVALID, S_TIP, m) // MMIO read
transition(S_INVALID, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_INVALID, S_TIP_CD, false) // acquire does not cause dirty immediately
transition(S_INVALID, S_TIP_D, m) // MMIO write
transition(S_INVALID, S_TRUNK_C, c) // acquire
transition(S_INVALID, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH, S_INVALID, b && p) // probe can do this (flushes run as evictions)
transition(S_BRANCH, S_BRANCH_C, b && c) // acquire
transition(S_BRANCH, S_TIP, b && m) // prefetch write
transition(S_BRANCH, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_BRANCH, S_TIP_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH, S_TIP_D, b && m) // MMIO write
transition(S_BRANCH, S_TRUNK_C, b && c) // acquire
transition(S_BRANCH, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH_C, S_INVALID, b && c && p)
transition(S_BRANCH_C, S_BRANCH, b && c) // clean release (optional)
transition(S_BRANCH_C, S_TIP, b && c && m) // prefetch write
transition(S_BRANCH_C, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_BRANCH_C, S_TIP_D, b && c && m) // MMIO write
transition(S_BRANCH_C, S_TIP_CD, false) // going dirty means we must shoot down clients
transition(S_BRANCH_C, S_TRUNK_C, b && c) // acquire
transition(S_BRANCH_C, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_TIP, S_INVALID, p)
transition(S_TIP, S_BRANCH, p) // losing TIP only possible via probe
transition(S_TIP, S_BRANCH_C, false) // we would go S_TRUNK_C instead
transition(S_TIP, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP, S_TIP_D, m) // direct dirty only via MMIO write
transition(S_TIP, S_TIP_CD, false) // acquire does not make us dirty immediately
transition(S_TIP, S_TRUNK_C, c) // acquire
transition(S_TIP, S_TRUNK_CD, false) // acquire does not make us dirty immediately
transition(S_TIP_C, S_INVALID, c && p)
transition(S_TIP_C, S_BRANCH, c && p) // losing TIP only possible via probe
transition(S_TIP_C, S_BRANCH_C, c && p) // losing TIP only possible via probe
transition(S_TIP_C, S_TIP, c) // probed while MMIO read || clean release (optional)
transition(S_TIP_C, S_TIP_D, c && m) // direct dirty only via MMIO write
transition(S_TIP_C, S_TIP_CD, false) // going dirty means we must shoot down clients
transition(S_TIP_C, S_TRUNK_C, c) // acquire
transition(S_TIP_C, S_TRUNK_CD, false) // acquire does not make us immediately dirty
transition(S_TIP_D, S_INVALID, p)
transition(S_TIP_D, S_BRANCH, p) // losing D is only possible via probe
transition(S_TIP_D, S_BRANCH_C, p && c) // probed while acquire shared
transition(S_TIP_D, S_TIP, p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TIP_D, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP_D, S_TIP_CD, false) // we would go S_TRUNK_CD instead
transition(S_TIP_D, S_TRUNK_C, p && c) // probed while acquired
transition(S_TIP_D, S_TRUNK_CD, c) // acquire
transition(S_TIP_CD, S_INVALID, c && p)
transition(S_TIP_CD, S_BRANCH, c && p) // losing D is only possible via probe
transition(S_TIP_CD, S_BRANCH_C, c && p) // losing D is only possible via probe
transition(S_TIP_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TIP_CD, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP_CD, S_TIP_D, c) // MMIO write || clean release (optional)
transition(S_TIP_CD, S_TRUNK_C, c && p) // probed while acquire
transition(S_TIP_CD, S_TRUNK_CD, c) // acquire
transition(S_TRUNK_C, S_INVALID, c && p)
transition(S_TRUNK_C, S_BRANCH, c && p) // losing TIP only possible via probe
transition(S_TRUNK_C, S_BRANCH_C, c && p) // losing TIP only possible via probe
transition(S_TRUNK_C, S_TIP, c) // MMIO read || clean release (optional)
transition(S_TRUNK_C, S_TIP_C, c) // bounce shared
transition(S_TRUNK_C, S_TIP_D, c) // dirty release
transition(S_TRUNK_C, S_TIP_CD, c) // dirty bounce shared
transition(S_TRUNK_C, S_TRUNK_CD, c) // dirty bounce
transition(S_TRUNK_CD, S_INVALID, c && p)
transition(S_TRUNK_CD, S_BRANCH, c && p) // losing D only possible via probe
transition(S_TRUNK_CD, S_BRANCH_C, c && p) // losing D only possible via probe
transition(S_TRUNK_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TRUNK_CD, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TRUNK_CD, S_TIP_D, c) // dirty release
transition(S_TRUNK_CD, S_TIP_CD, c) // bounce shared
transition(S_TRUNK_CD, S_TRUNK_C, c && p) // probed while acquire
}
// Handle response messages
val probe_bit = params.clientBit(io.sinkc.bits.source)
val last_probe = (probes_done | probe_bit) === (meta.clients & ~excluded_client)
val probe_toN = isToN(io.sinkc.bits.param)
if (!params.firstLevel) when (io.sinkc.valid) {
params.ccover( probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_FULL", "Client downgraded to N when asked only to do B")
params.ccover(!probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_HALF", "Client downgraded to B when asked only to do B")
// Caution: the probe matches us only in set.
// We would never allow an outer probe to nest until both w_[rp]probeack complete, so
// it is safe to just unguardedly update the probe FSM.
probes_done := probes_done | probe_bit
probes_toN := probes_toN | Mux(probe_toN, probe_bit, 0.U)
probes_noT := probes_noT || io.sinkc.bits.param =/= TtoT
w_rprobeackfirst := w_rprobeackfirst || last_probe
w_rprobeacklast := w_rprobeacklast || (last_probe && io.sinkc.bits.last)
w_pprobeackfirst := w_pprobeackfirst || last_probe
w_pprobeacklast := w_pprobeacklast || (last_probe && io.sinkc.bits.last)
// Allow wormhole routing from sinkC if the first request beat has offset 0
val set_pprobeack = last_probe && (io.sinkc.bits.last || request.offset === 0.U)
w_pprobeack := w_pprobeack || set_pprobeack
params.ccover(!set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_SERIAL", "Sequential routing of probe response data")
params.ccover( set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_WORMHOLE", "Wormhole routing of probe response data")
// However, meta-data updates need to be done more cautiously
when (meta.state =/= INVALID && io.sinkc.bits.tag === meta.tag && io.sinkc.bits.data) { meta.dirty := true.B } // !!!
}
when (io.sinkd.valid) {
when (io.sinkd.bits.opcode === Grant || io.sinkd.bits.opcode === GrantData) {
sink := io.sinkd.bits.sink
w_grantfirst := true.B
w_grantlast := io.sinkd.bits.last
// Record if we need to prevent taking ownership
bad_grant := io.sinkd.bits.denied
// Allow wormhole routing for requests whose first beat has offset 0
w_grant := request.offset === 0.U || io.sinkd.bits.last
params.ccover(io.sinkd.bits.opcode === GrantData && request.offset === 0.U, "MSHR_GRANT_WORMHOLE", "Wormhole routing of grant response data")
params.ccover(io.sinkd.bits.opcode === GrantData && request.offset =/= 0.U, "MSHR_GRANT_SERIAL", "Sequential routing of grant response data")
gotT := io.sinkd.bits.param === toT
}
.elsewhen (io.sinkd.bits.opcode === ReleaseAck) {
w_releaseack := true.B
}
}
when (io.sinke.valid) {
w_grantack := true.B
}
// Bootstrap new requests
val allocate_as_full = WireInit(new FullRequest(params), init = io.allocate.bits)
val new_meta = Mux(io.allocate.valid && io.allocate.bits.repeat, final_meta_writeback, io.directory.bits)
val new_request = Mux(io.allocate.valid, allocate_as_full, request)
val new_needT = needT(new_request.opcode, new_request.param)
val new_clientBit = params.clientBit(new_request.source)
val new_skipProbe = Mux(skipProbeN(new_request.opcode, params.cache.hintsSkipProbe), new_clientBit, 0.U)
val prior = cacheState(final_meta_writeback, true.B)
def bypass(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(prior === from.code, s"MSHR_${from}_BYPASS", s"State bypass transition from ${from} ${cfg}")
} else {
assert(!(prior === from.code), cf"State bypass from ${from} should be impossible ${cfg}")
}
}
when (io.allocate.valid && io.allocate.bits.repeat) {
bypass(S_INVALID, f || p) // Can lose permissions (probe/flush)
bypass(S_BRANCH, b) // MMIO read to read-only device
bypass(S_BRANCH_C, b && c) // you need children to become C
bypass(S_TIP, true) // MMIO read || clean release can lead to this state
bypass(S_TIP_C, c) // needs two clients || client + mmio || downgrading client
bypass(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client
bypass(S_TIP_D, true) // MMIO write || dirty release lead here
bypass(S_TRUNK_C, c) // acquire for write
bypass(S_TRUNK_CD, c) // dirty release then reacquire
}
when (io.allocate.valid) {
assert (!request_valid || (no_wait && io.schedule.fire))
request_valid := true.B
request := io.allocate.bits
}
// Create execution plan
when (io.directory.valid || (io.allocate.valid && io.allocate.bits.repeat)) {
meta_valid := true.B
meta := new_meta
probes_done := 0.U
probes_toN := 0.U
probes_noT := false.B
gotT := false.B
bad_grant := false.B
// These should already be either true or turning true
// We clear them here explicitly to simplify the mux tree
s_rprobe := true.B
w_rprobeackfirst := true.B
w_rprobeacklast := true.B
s_release := true.B
w_releaseack := true.B
s_pprobe := true.B
s_acquire := true.B
s_flush := true.B
w_grantfirst := true.B
w_grantlast := true.B
w_grant := true.B
w_pprobeackfirst := true.B
w_pprobeacklast := true.B
w_pprobeack := true.B
s_probeack := true.B
s_grantack := true.B
s_execute := true.B
w_grantack := true.B
s_writeback := true.B
// For C channel requests (ie: Release[Data])
when (new_request.prio(2) && (!params.firstLevel).B) {
s_execute := false.B
// Do we need to go dirty?
when (new_request.opcode(0) && !new_meta.dirty) {
s_writeback := false.B
}
// Does our state change?
when (isToB(new_request.param) && new_meta.state === TRUNK) {
s_writeback := false.B
}
// Do our clients change?
when (isToN(new_request.param) && (new_meta.clients & new_clientBit) =/= 0.U) {
s_writeback := false.B
}
assert (new_meta.hit)
}
// For X channel requests (ie: flush)
.elsewhen (new_request.control && params.control.B) { // new_request.prio(0)
s_flush := false.B
// Do we need to actually do something?
when (new_meta.hit) {
s_release := false.B
w_releaseack := false.B
// Do we need to shoot-down inner caches?
when ((!params.firstLevel).B && (new_meta.clients =/= 0.U)) {
s_rprobe := false.B
w_rprobeackfirst := false.B
w_rprobeacklast := false.B
}
}
}
// For A channel requests
.otherwise { // new_request.prio(0) && !new_request.control
s_execute := false.B
// Do we need an eviction?
when (!new_meta.hit && new_meta.state =/= INVALID) {
s_release := false.B
w_releaseack := false.B
// Do we need to shoot-down inner caches?
when ((!params.firstLevel).B & (new_meta.clients =/= 0.U)) {
s_rprobe := false.B
w_rprobeackfirst := false.B
w_rprobeacklast := false.B
}
}
// Do we need an acquire?
when (!new_meta.hit || (new_meta.state === BRANCH && new_needT)) {
s_acquire := false.B
w_grantfirst := false.B
w_grantlast := false.B
w_grant := false.B
s_grantack := false.B
s_writeback := false.B
}
// Do we need a probe?
when ((!params.firstLevel).B && (new_meta.hit &&
(new_needT || new_meta.state === TRUNK) &&
(new_meta.clients & ~new_skipProbe) =/= 0.U)) {
s_pprobe := false.B
w_pprobeackfirst := false.B
w_pprobeacklast := false.B
w_pprobeack := false.B
s_writeback := false.B
}
// Do we need a grantack?
when (new_request.opcode === AcquireBlock || new_request.opcode === AcquirePerm) {
w_grantack := false.B
s_writeback := false.B
}
// Becomes dirty?
when (!new_request.opcode(2) && new_meta.hit && !new_meta.dirty) {
s_writeback := false.B
}
}
}
}
File Parameters.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property.cover
import scala.math.{min,max}
case class CacheParameters(
level: Int,
ways: Int,
sets: Int,
blockBytes: Int,
beatBytes: Int, // inner
hintsSkipProbe: Boolean)
{
require (ways > 0)
require (sets > 0)
require (blockBytes > 0 && isPow2(blockBytes))
require (beatBytes > 0 && isPow2(beatBytes))
require (blockBytes >= beatBytes)
val blocks = ways * sets
val sizeBytes = blocks * blockBytes
val blockBeats = blockBytes/beatBytes
}
case class InclusiveCachePortParameters(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e))
}
object InclusiveCachePortParameters
{
val none = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.none)
val full = InclusiveCachePortParameters(
a = BufferParams.default,
b = BufferParams.default,
c = BufferParams.default,
d = BufferParams.default,
e = BufferParams.default)
// This removes feed-through paths from C=>A and A=>C
val fullC = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.default,
d = BufferParams.none,
e = BufferParams.none)
val flowAD = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.flow,
e = BufferParams.none)
val flowAE = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.flow)
// For innerBuf:
// SinkA: no restrictions, flows into scheduler+putbuffer
// SourceB: no restrictions, flows out of scheduler
// sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore
// SourceD: no restrictions, flows out of bankedStore/regout
// SinkE: no restrictions, flows into scheduler
//
// ... so while none is possible, you probably want at least flowAC to cut ready
// from the scheduler delay and flowD to ease SourceD back-pressure
// For outerBufer:
// SourceA: must not be pipe, flows out of scheduler
// SinkB: no restrictions, flows into scheduler
// SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored
// SinkD: no restrictions, flows into scheduler & bankedStore
// SourceE: must not be pipe, flows out of scheduler
//
// ... AE take the channel ready into the scheduler, so you need at least flowAE
}
case class InclusiveCacheMicroParameters(
writeBytes: Int, // backing store update granularity
memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz)
portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes
dirReg: Boolean = false,
innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none
outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE
{
require (writeBytes > 0 && isPow2(writeBytes))
require (memCycles > 0)
require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant
}
case class InclusiveCacheControlParameters(
address: BigInt,
beatBytes: Int,
bankedControl: Boolean)
case class InclusiveCacheParameters(
cache: CacheParameters,
micro: InclusiveCacheMicroParameters,
control: Boolean,
inner: TLEdgeIn,
outer: TLEdgeOut)(implicit val p: Parameters)
{
require (cache.ways > 1)
require (cache.sets > 1 && isPow2(cache.sets))
require (micro.writeBytes <= inner.manager.beatBytes)
require (micro.writeBytes <= outer.manager.beatBytes)
require (inner.manager.beatBytes <= cache.blockBytes)
require (outer.manager.beatBytes <= cache.blockBytes)
// Require that all cached address ranges have contiguous blocks
outer.manager.managers.flatMap(_.address).foreach { a =>
require (a.alignment >= cache.blockBytes)
}
// If we are the first level cache, we do not need to support inner-BCE
val firstLevel = !inner.client.clients.exists(_.supports.probe)
// If we are the last level cache, we do not need to support outer-B
val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED)
require (lastLevel)
// Provision enough resources to achieve full throughput with missing single-beat accesses
val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro)
val secondary = max(mshrs, micro.memCycles - mshrs)
val putLists = micro.memCycles // allow every request to be single beat
val putBeats = max(2*cache.blockBeats, micro.memCycles)
val relLists = 2
val relBeats = relLists*cache.blockBeats
val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address))
val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_))
def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] =
if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail)
val addressMapping = bitOffsets(pickMask)
val addressBits = addressMapping.size
// println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}")
val allClients = inner.client.clients.size
val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size
val clientBits = max(1, clientBitsRaw)
val stateBits = 2
val wayBits = log2Ceil(cache.ways)
val setBits = log2Ceil(cache.sets)
val offsetBits = log2Ceil(cache.blockBytes)
val tagBits = addressBits - setBits - offsetBits
val putBits = log2Ceil(max(putLists, relLists))
require (tagBits > 0)
require (offsetBits > 0)
val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1
val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1
val innerMaskBits = inner.manager.beatBytes / micro.writeBytes
val outerMaskBits = outer.manager.beatBytes / micro.writeBytes
def clientBit(source: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse)
}
}
def clientSource(bit: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U))
}
}
def parseAddress(x: UInt): (UInt, UInt, UInt) = {
val offset = Cat(addressMapping.map(o => x(o,o)).reverse)
val set = offset >> offsetBits
val tag = set >> setBits
(tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0))
}
def widen(x: UInt, width: Int): UInt = {
val y = x | 0.U(width.W)
assert (y >> width === 0.U)
y(width-1, 0)
}
def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = {
val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits))
val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) }
addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) }
Cat(bits.reverse)
}
def restoreAddress(expanded: UInt): UInt = {
val missingBits = flatAddresses
.map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match
.groupBy(_._1)
.view
.mapValues(_.map(_._2))
val muxMask = AddressDecoder(missingBits.values.toList)
val mux = missingBits.toList.map { case (bits, addrs) =>
val widen = addrs.map(_.widen(~muxMask))
val matches = AddressSet
.unify(widen.distinct)
.map(_.contains(expanded))
.reduce(_ || _)
(matches, bits.U)
}
expanded | Mux1H(mux)
}
def dirReg[T <: Data](x: T, en: Bool = true.B): T = {
if (micro.dirReg) RegEnable(x, en) else x
}
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc)
}
object MetaData
{
val stateBits = 2
def INVALID: UInt = 0.U(stateBits.W) // way is empty
def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk
def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk
def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch
// Does a request need trunk?
def needT(opcode: UInt, param: UInt): Bool = {
!opcode(2) ||
(opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) ||
((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB)
}
// Does a request prove the client need not be probed?
def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = {
// Acquire(toB) and Get => is N, so no probe
// Acquire(*toT) => is N or B, but need T, so no probe
// Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client
// Put* => is N or B, so probe IS needed
opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B)
}
def isToN(param: UInt): Bool = {
param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN
}
def isToB(param: UInt): Bool = {
param === TLPermissions.TtoB || param === TLPermissions.BtoB
}
}
object InclusiveCacheParameters
{
val lfsrBits = 10
val L2ControlAddress = 0x2010000
val L2ControlSize = 0x1000
def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = {
// We need 2-3 normal MSHRs to cover the Directory latency
// To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs
max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats)
}
def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int =
// We need a dedicated MSHR for B+C each
2 + out_mshrs(cache, micro)
}
class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
| module MSHR_9( // @[MSHR.scala:84:7]
input clock, // @[MSHR.scala:84:7]
input reset, // @[MSHR.scala:84:7]
input io_allocate_valid, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_0, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_1, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_2, // @[MSHR.scala:86:14]
input io_allocate_bits_control, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_opcode, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_param, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_size, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_source, // @[MSHR.scala:86:14]
input [8:0] io_allocate_bits_tag, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_offset, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_put, // @[MSHR.scala:86:14]
input [10:0] io_allocate_bits_set, // @[MSHR.scala:86:14]
input io_allocate_bits_repeat, // @[MSHR.scala:86:14]
input io_directory_valid, // @[MSHR.scala:86:14]
input io_directory_bits_dirty, // @[MSHR.scala:86:14]
input [1:0] io_directory_bits_state, // @[MSHR.scala:86:14]
input io_directory_bits_clients, // @[MSHR.scala:86:14]
input [8:0] io_directory_bits_tag, // @[MSHR.scala:86:14]
input io_directory_bits_hit, // @[MSHR.scala:86:14]
input [3:0] io_directory_bits_way, // @[MSHR.scala:86:14]
output io_status_valid, // @[MSHR.scala:86:14]
output [10:0] io_status_bits_set, // @[MSHR.scala:86:14]
output [8:0] io_status_bits_tag, // @[MSHR.scala:86:14]
output [3:0] io_status_bits_way, // @[MSHR.scala:86:14]
output io_status_bits_blockB, // @[MSHR.scala:86:14]
output io_status_bits_nestB, // @[MSHR.scala:86:14]
output io_status_bits_blockC, // @[MSHR.scala:86:14]
output io_status_bits_nestC, // @[MSHR.scala:86:14]
input io_schedule_ready, // @[MSHR.scala:86:14]
output io_schedule_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_a_valid, // @[MSHR.scala:86:14]
output [8:0] io_schedule_bits_a_bits_tag, // @[MSHR.scala:86:14]
output [10:0] io_schedule_bits_a_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_a_bits_param, // @[MSHR.scala:86:14]
output io_schedule_bits_a_bits_block, // @[MSHR.scala:86:14]
output io_schedule_bits_b_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_b_bits_param, // @[MSHR.scala:86:14]
output [8:0] io_schedule_bits_b_bits_tag, // @[MSHR.scala:86:14]
output [10:0] io_schedule_bits_b_bits_set, // @[MSHR.scala:86:14]
output io_schedule_bits_b_bits_clients, // @[MSHR.scala:86:14]
output io_schedule_bits_c_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_opcode, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_param, // @[MSHR.scala:86:14]
output [8:0] io_schedule_bits_c_bits_tag, // @[MSHR.scala:86:14]
output [10:0] io_schedule_bits_c_bits_set, // @[MSHR.scala:86:14]
output [3:0] io_schedule_bits_c_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_c_bits_dirty, // @[MSHR.scala:86:14]
output io_schedule_bits_d_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_0, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_1, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_2, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_control, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_opcode, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_param, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_size, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_source, // @[MSHR.scala:86:14]
output [8:0] io_schedule_bits_d_bits_tag, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_offset, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_put, // @[MSHR.scala:86:14]
output [10:0] io_schedule_bits_d_bits_set, // @[MSHR.scala:86:14]
output [3:0] io_schedule_bits_d_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_bad, // @[MSHR.scala:86:14]
output io_schedule_bits_e_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_e_bits_sink, // @[MSHR.scala:86:14]
output io_schedule_bits_x_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_valid, // @[MSHR.scala:86:14]
output [10:0] io_schedule_bits_dir_bits_set, // @[MSHR.scala:86:14]
output [3:0] io_schedule_bits_dir_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_bits_data_dirty, // @[MSHR.scala:86:14]
output [1:0] io_schedule_bits_dir_bits_data_state, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_bits_data_clients, // @[MSHR.scala:86:14]
output [8:0] io_schedule_bits_dir_bits_data_tag, // @[MSHR.scala:86:14]
output io_schedule_bits_reload, // @[MSHR.scala:86:14]
input io_sinkc_valid, // @[MSHR.scala:86:14]
input io_sinkc_bits_last, // @[MSHR.scala:86:14]
input [10:0] io_sinkc_bits_set, // @[MSHR.scala:86:14]
input [8:0] io_sinkc_bits_tag, // @[MSHR.scala:86:14]
input [5:0] io_sinkc_bits_source, // @[MSHR.scala:86:14]
input [2:0] io_sinkc_bits_param, // @[MSHR.scala:86:14]
input io_sinkc_bits_data, // @[MSHR.scala:86:14]
input io_sinkd_valid, // @[MSHR.scala:86:14]
input io_sinkd_bits_last, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_opcode, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_param, // @[MSHR.scala:86:14]
input [3:0] io_sinkd_bits_source, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_sink, // @[MSHR.scala:86:14]
input io_sinkd_bits_denied, // @[MSHR.scala:86:14]
input io_sinke_valid, // @[MSHR.scala:86:14]
input [3:0] io_sinke_bits_sink, // @[MSHR.scala:86:14]
input [10:0] io_nestedwb_set, // @[MSHR.scala:86:14]
input [8:0] io_nestedwb_tag, // @[MSHR.scala:86:14]
input io_nestedwb_b_toN, // @[MSHR.scala:86:14]
input io_nestedwb_b_toB, // @[MSHR.scala:86:14]
input io_nestedwb_b_clr_dirty, // @[MSHR.scala:86:14]
input io_nestedwb_c_set_dirty // @[MSHR.scala:86:14]
);
wire [8:0] final_meta_writeback_tag; // @[MSHR.scala:215:38]
wire final_meta_writeback_clients; // @[MSHR.scala:215:38]
wire [1:0] final_meta_writeback_state; // @[MSHR.scala:215:38]
wire final_meta_writeback_dirty; // @[MSHR.scala:215:38]
wire io_allocate_valid_0 = io_allocate_valid; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_0_0 = io_allocate_bits_prio_0; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_1_0 = io_allocate_bits_prio_1; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_2_0 = io_allocate_bits_prio_2; // @[MSHR.scala:84:7]
wire io_allocate_bits_control_0 = io_allocate_bits_control; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_opcode_0 = io_allocate_bits_opcode; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_param_0 = io_allocate_bits_param; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_size_0 = io_allocate_bits_size; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_source_0 = io_allocate_bits_source; // @[MSHR.scala:84:7]
wire [8:0] io_allocate_bits_tag_0 = io_allocate_bits_tag; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_offset_0 = io_allocate_bits_offset; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_put_0 = io_allocate_bits_put; // @[MSHR.scala:84:7]
wire [10:0] io_allocate_bits_set_0 = io_allocate_bits_set; // @[MSHR.scala:84:7]
wire io_allocate_bits_repeat_0 = io_allocate_bits_repeat; // @[MSHR.scala:84:7]
wire io_directory_valid_0 = io_directory_valid; // @[MSHR.scala:84:7]
wire io_directory_bits_dirty_0 = io_directory_bits_dirty; // @[MSHR.scala:84:7]
wire [1:0] io_directory_bits_state_0 = io_directory_bits_state; // @[MSHR.scala:84:7]
wire io_directory_bits_clients_0 = io_directory_bits_clients; // @[MSHR.scala:84:7]
wire [8:0] io_directory_bits_tag_0 = io_directory_bits_tag; // @[MSHR.scala:84:7]
wire io_directory_bits_hit_0 = io_directory_bits_hit; // @[MSHR.scala:84:7]
wire [3:0] io_directory_bits_way_0 = io_directory_bits_way; // @[MSHR.scala:84:7]
wire io_schedule_ready_0 = io_schedule_ready; // @[MSHR.scala:84:7]
wire io_sinkc_valid_0 = io_sinkc_valid; // @[MSHR.scala:84:7]
wire io_sinkc_bits_last_0 = io_sinkc_bits_last; // @[MSHR.scala:84:7]
wire [10:0] io_sinkc_bits_set_0 = io_sinkc_bits_set; // @[MSHR.scala:84:7]
wire [8:0] io_sinkc_bits_tag_0 = io_sinkc_bits_tag; // @[MSHR.scala:84:7]
wire [5:0] io_sinkc_bits_source_0 = io_sinkc_bits_source; // @[MSHR.scala:84:7]
wire [2:0] io_sinkc_bits_param_0 = io_sinkc_bits_param; // @[MSHR.scala:84:7]
wire io_sinkc_bits_data_0 = io_sinkc_bits_data; // @[MSHR.scala:84:7]
wire io_sinkd_valid_0 = io_sinkd_valid; // @[MSHR.scala:84:7]
wire io_sinkd_bits_last_0 = io_sinkd_bits_last; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_opcode_0 = io_sinkd_bits_opcode; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_param_0 = io_sinkd_bits_param; // @[MSHR.scala:84:7]
wire [3:0] io_sinkd_bits_source_0 = io_sinkd_bits_source; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_sink_0 = io_sinkd_bits_sink; // @[MSHR.scala:84:7]
wire io_sinkd_bits_denied_0 = io_sinkd_bits_denied; // @[MSHR.scala:84:7]
wire io_sinke_valid_0 = io_sinke_valid; // @[MSHR.scala:84:7]
wire [3:0] io_sinke_bits_sink_0 = io_sinke_bits_sink; // @[MSHR.scala:84:7]
wire [10:0] io_nestedwb_set_0 = io_nestedwb_set; // @[MSHR.scala:84:7]
wire [8:0] io_nestedwb_tag_0 = io_nestedwb_tag; // @[MSHR.scala:84:7]
wire io_nestedwb_b_toN_0 = io_nestedwb_b_toN; // @[MSHR.scala:84:7]
wire io_nestedwb_b_toB_0 = io_nestedwb_b_toB; // @[MSHR.scala:84:7]
wire io_nestedwb_b_clr_dirty_0 = io_nestedwb_b_clr_dirty; // @[MSHR.scala:84:7]
wire io_nestedwb_c_set_dirty_0 = io_nestedwb_c_set_dirty; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_a_bits_source = 4'h0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_c_bits_source = 4'h0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_d_bits_sink = 4'h0; // @[MSHR.scala:84:7]
wire io_schedule_bits_x_bits_fail = 1'h0; // @[MSHR.scala:84:7]
wire _io_schedule_bits_c_valid_T_2 = 1'h0; // @[MSHR.scala:186:68]
wire _io_schedule_bits_c_valid_T_3 = 1'h0; // @[MSHR.scala:186:80]
wire invalid_dirty = 1'h0; // @[MSHR.scala:268:21]
wire invalid_clients = 1'h0; // @[MSHR.scala:268:21]
wire _excluded_client_T_7 = 1'h0; // @[Parameters.scala:279:137]
wire _after_T_4 = 1'h0; // @[MSHR.scala:323:11]
wire _new_skipProbe_T_6 = 1'h0; // @[Parameters.scala:279:137]
wire _prior_T_4 = 1'h0; // @[MSHR.scala:323:11]
wire [8:0] invalid_tag = 9'h0; // @[MSHR.scala:268:21]
wire [1:0] invalid_state = 2'h0; // @[MSHR.scala:268:21]
wire [1:0] _final_meta_writeback_state_T_11 = 2'h1; // @[MSHR.scala:240:70]
wire allocate_as_full_prio_0 = io_allocate_bits_prio_0_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_prio_1 = io_allocate_bits_prio_1_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_prio_2 = io_allocate_bits_prio_2_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_control = io_allocate_bits_control_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_opcode = io_allocate_bits_opcode_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_param = io_allocate_bits_param_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_size = io_allocate_bits_size_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_source = io_allocate_bits_source_0; // @[MSHR.scala:84:7, :504:34]
wire [8:0] allocate_as_full_tag = io_allocate_bits_tag_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_offset = io_allocate_bits_offset_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_put = io_allocate_bits_put_0; // @[MSHR.scala:84:7, :504:34]
wire [10:0] allocate_as_full_set = io_allocate_bits_set_0; // @[MSHR.scala:84:7, :504:34]
wire _io_status_bits_blockB_T_8; // @[MSHR.scala:168:40]
wire _io_status_bits_nestB_T_4; // @[MSHR.scala:169:93]
wire _io_status_bits_blockC_T; // @[MSHR.scala:172:28]
wire _io_status_bits_nestC_T_5; // @[MSHR.scala:173:39]
wire _io_schedule_valid_T_5; // @[MSHR.scala:193:105]
wire _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:184:55]
wire _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:283:91]
wire _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:185:41]
wire [2:0] _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:286:41]
wire [8:0] _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:287:41]
wire _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:289:51]
wire _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:186:64]
wire [2:0] _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:290:41]
wire [2:0] _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:291:41]
wire _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:187:57]
wire [2:0] _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:298:41]
wire _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:188:43]
wire _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:189:40]
wire _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:190:66]
wire _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:310:41]
wire [1:0] _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:310:41]
wire _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:310:41]
wire [8:0] _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:310:41]
wire no_wait; // @[MSHR.scala:183:83]
wire [10:0] io_status_bits_set_0; // @[MSHR.scala:84:7]
wire [8:0] io_status_bits_tag_0; // @[MSHR.scala:84:7]
wire [3:0] io_status_bits_way_0; // @[MSHR.scala:84:7]
wire io_status_bits_blockB_0; // @[MSHR.scala:84:7]
wire io_status_bits_nestB_0; // @[MSHR.scala:84:7]
wire io_status_bits_blockC_0; // @[MSHR.scala:84:7]
wire io_status_bits_nestC_0; // @[MSHR.scala:84:7]
wire io_status_valid_0; // @[MSHR.scala:84:7]
wire [8:0] io_schedule_bits_a_bits_tag_0; // @[MSHR.scala:84:7]
wire [10:0] io_schedule_bits_a_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_a_bits_param_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_a_bits_block_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_a_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_b_bits_param_0; // @[MSHR.scala:84:7]
wire [8:0] io_schedule_bits_b_bits_tag_0; // @[MSHR.scala:84:7]
wire [10:0] io_schedule_bits_b_bits_set_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_b_bits_clients_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_opcode_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_param_0; // @[MSHR.scala:84:7]
wire [8:0] io_schedule_bits_c_bits_tag_0; // @[MSHR.scala:84:7]
wire [10:0] io_schedule_bits_c_bits_set_0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_c_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_c_bits_dirty_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_0_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_1_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_2_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_control_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_opcode_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_param_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_size_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_source_0; // @[MSHR.scala:84:7]
wire [8:0] io_schedule_bits_d_bits_tag_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_offset_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_put_0; // @[MSHR.scala:84:7]
wire [10:0] io_schedule_bits_d_bits_set_0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_d_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_bad_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_e_bits_sink_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_bits_data_dirty_0; // @[MSHR.scala:84:7]
wire [1:0] io_schedule_bits_dir_bits_data_state_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_bits_data_clients_0; // @[MSHR.scala:84:7]
wire [8:0] io_schedule_bits_dir_bits_data_tag_0; // @[MSHR.scala:84:7]
wire [10:0] io_schedule_bits_dir_bits_set_0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_dir_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_reload_0; // @[MSHR.scala:84:7]
wire io_schedule_valid_0; // @[MSHR.scala:84:7]
reg request_valid; // @[MSHR.scala:97:30]
assign io_status_valid_0 = request_valid; // @[MSHR.scala:84:7, :97:30]
reg request_prio_0; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_0_0 = request_prio_0; // @[MSHR.scala:84:7, :98:20]
reg request_prio_1; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_1_0 = request_prio_1; // @[MSHR.scala:84:7, :98:20]
reg request_prio_2; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_2_0 = request_prio_2; // @[MSHR.scala:84:7, :98:20]
reg request_control; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_control_0 = request_control; // @[MSHR.scala:84:7, :98:20]
reg [2:0] request_opcode; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_opcode_0 = request_opcode; // @[MSHR.scala:84:7, :98:20]
reg [2:0] request_param; // @[MSHR.scala:98:20]
reg [2:0] request_size; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_size_0 = request_size; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_source; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_source_0 = request_source; // @[MSHR.scala:84:7, :98:20]
reg [8:0] request_tag; // @[MSHR.scala:98:20]
assign io_status_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_a_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_d_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_offset; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_offset_0 = request_offset; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_put; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_put_0 = request_put; // @[MSHR.scala:84:7, :98:20]
reg [10:0] request_set; // @[MSHR.scala:98:20]
assign io_status_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_a_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_b_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_c_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_d_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_dir_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
reg meta_valid; // @[MSHR.scala:99:27]
reg meta_dirty; // @[MSHR.scala:100:17]
assign io_schedule_bits_c_bits_dirty_0 = meta_dirty; // @[MSHR.scala:84:7, :100:17]
reg [1:0] meta_state; // @[MSHR.scala:100:17]
reg meta_clients; // @[MSHR.scala:100:17]
wire _meta_no_clients_T = meta_clients; // @[MSHR.scala:100:17, :220:39]
wire evict_c = meta_clients; // @[MSHR.scala:100:17, :315:27]
wire before_c = meta_clients; // @[MSHR.scala:100:17, :315:27]
reg [8:0] meta_tag; // @[MSHR.scala:100:17]
assign io_schedule_bits_c_bits_tag_0 = meta_tag; // @[MSHR.scala:84:7, :100:17]
reg meta_hit; // @[MSHR.scala:100:17]
reg [3:0] meta_way; // @[MSHR.scala:100:17]
assign io_status_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_c_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_d_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_dir_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
wire [3:0] final_meta_writeback_way = meta_way; // @[MSHR.scala:100:17, :215:38]
reg s_rprobe; // @[MSHR.scala:121:33]
reg w_rprobeackfirst; // @[MSHR.scala:122:33]
reg w_rprobeacklast; // @[MSHR.scala:123:33]
reg s_release; // @[MSHR.scala:124:33]
reg w_releaseack; // @[MSHR.scala:125:33]
reg s_pprobe; // @[MSHR.scala:126:33]
reg s_acquire; // @[MSHR.scala:127:33]
reg s_flush; // @[MSHR.scala:128:33]
reg w_grantfirst; // @[MSHR.scala:129:33]
reg w_grantlast; // @[MSHR.scala:130:33]
reg w_grant; // @[MSHR.scala:131:33]
reg w_pprobeackfirst; // @[MSHR.scala:132:33]
reg w_pprobeacklast; // @[MSHR.scala:133:33]
reg w_pprobeack; // @[MSHR.scala:134:33]
reg s_grantack; // @[MSHR.scala:136:33]
reg s_execute; // @[MSHR.scala:137:33]
reg w_grantack; // @[MSHR.scala:138:33]
reg s_writeback; // @[MSHR.scala:139:33]
reg [2:0] sink; // @[MSHR.scala:147:17]
assign io_schedule_bits_e_bits_sink_0 = sink; // @[MSHR.scala:84:7, :147:17]
reg gotT; // @[MSHR.scala:148:17]
reg bad_grant; // @[MSHR.scala:149:22]
assign io_schedule_bits_d_bits_bad_0 = bad_grant; // @[MSHR.scala:84:7, :149:22]
reg probes_done; // @[MSHR.scala:150:24]
reg probes_toN; // @[MSHR.scala:151:23]
reg probes_noT; // @[MSHR.scala:152:23]
wire _io_status_bits_blockB_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28]
wire _io_status_bits_blockB_T_1 = ~w_releaseack; // @[MSHR.scala:125:33, :168:45]
wire _io_status_bits_blockB_T_2 = ~w_rprobeacklast; // @[MSHR.scala:123:33, :168:62]
wire _io_status_bits_blockB_T_3 = _io_status_bits_blockB_T_1 | _io_status_bits_blockB_T_2; // @[MSHR.scala:168:{45,59,62}]
wire _io_status_bits_blockB_T_4 = ~w_pprobeacklast; // @[MSHR.scala:133:33, :168:82]
wire _io_status_bits_blockB_T_5 = _io_status_bits_blockB_T_3 | _io_status_bits_blockB_T_4; // @[MSHR.scala:168:{59,79,82}]
wire _io_status_bits_blockB_T_6 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103]
wire _io_status_bits_blockB_T_7 = _io_status_bits_blockB_T_5 & _io_status_bits_blockB_T_6; // @[MSHR.scala:168:{79,100,103}]
assign _io_status_bits_blockB_T_8 = _io_status_bits_blockB_T | _io_status_bits_blockB_T_7; // @[MSHR.scala:168:{28,40,100}]
assign io_status_bits_blockB_0 = _io_status_bits_blockB_T_8; // @[MSHR.scala:84:7, :168:40]
wire _io_status_bits_nestB_T = meta_valid & w_releaseack; // @[MSHR.scala:99:27, :125:33, :169:39]
wire _io_status_bits_nestB_T_1 = _io_status_bits_nestB_T & w_rprobeacklast; // @[MSHR.scala:123:33, :169:{39,55}]
wire _io_status_bits_nestB_T_2 = _io_status_bits_nestB_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :169:{55,74}]
wire _io_status_bits_nestB_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :169:96]
assign _io_status_bits_nestB_T_4 = _io_status_bits_nestB_T_2 & _io_status_bits_nestB_T_3; // @[MSHR.scala:169:{74,93,96}]
assign io_status_bits_nestB_0 = _io_status_bits_nestB_T_4; // @[MSHR.scala:84:7, :169:93]
assign _io_status_bits_blockC_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28, :172:28]
assign io_status_bits_blockC_0 = _io_status_bits_blockC_T; // @[MSHR.scala:84:7, :172:28]
wire _io_status_bits_nestC_T = ~w_rprobeackfirst; // @[MSHR.scala:122:33, :173:43]
wire _io_status_bits_nestC_T_1 = ~w_pprobeackfirst; // @[MSHR.scala:132:33, :173:64]
wire _io_status_bits_nestC_T_2 = _io_status_bits_nestC_T | _io_status_bits_nestC_T_1; // @[MSHR.scala:173:{43,61,64}]
wire _io_status_bits_nestC_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :173:85]
wire _io_status_bits_nestC_T_4 = _io_status_bits_nestC_T_2 | _io_status_bits_nestC_T_3; // @[MSHR.scala:173:{61,82,85}]
assign _io_status_bits_nestC_T_5 = meta_valid & _io_status_bits_nestC_T_4; // @[MSHR.scala:99:27, :173:{39,82}]
assign io_status_bits_nestC_0 = _io_status_bits_nestC_T_5; // @[MSHR.scala:84:7, :173:39]
wire _no_wait_T = w_rprobeacklast & w_releaseack; // @[MSHR.scala:123:33, :125:33, :183:33]
wire _no_wait_T_1 = _no_wait_T & w_grantlast; // @[MSHR.scala:130:33, :183:{33,49}]
wire _no_wait_T_2 = _no_wait_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :183:{49,64}]
assign no_wait = _no_wait_T_2 & w_grantack; // @[MSHR.scala:138:33, :183:{64,83}]
assign io_schedule_bits_reload_0 = no_wait; // @[MSHR.scala:84:7, :183:83]
wire _io_schedule_bits_a_valid_T = ~s_acquire; // @[MSHR.scala:127:33, :184:31]
wire _io_schedule_bits_a_valid_T_1 = _io_schedule_bits_a_valid_T & s_release; // @[MSHR.scala:124:33, :184:{31,42}]
assign _io_schedule_bits_a_valid_T_2 = _io_schedule_bits_a_valid_T_1 & s_pprobe; // @[MSHR.scala:126:33, :184:{42,55}]
assign io_schedule_bits_a_valid_0 = _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:84:7, :184:55]
wire _io_schedule_bits_b_valid_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31]
wire _io_schedule_bits_b_valid_T_1 = ~s_pprobe; // @[MSHR.scala:126:33, :185:44]
assign _io_schedule_bits_b_valid_T_2 = _io_schedule_bits_b_valid_T | _io_schedule_bits_b_valid_T_1; // @[MSHR.scala:185:{31,41,44}]
assign io_schedule_bits_b_valid_0 = _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:84:7, :185:41]
wire _io_schedule_bits_c_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32]
wire _io_schedule_bits_c_valid_T_1 = _io_schedule_bits_c_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :186:{32,43}]
assign _io_schedule_bits_c_valid_T_4 = _io_schedule_bits_c_valid_T_1; // @[MSHR.scala:186:{43,64}]
assign io_schedule_bits_c_valid_0 = _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:84:7, :186:64]
wire _io_schedule_bits_d_valid_T = ~s_execute; // @[MSHR.scala:137:33, :187:31]
wire _io_schedule_bits_d_valid_T_1 = _io_schedule_bits_d_valid_T & w_pprobeack; // @[MSHR.scala:134:33, :187:{31,42}]
assign _io_schedule_bits_d_valid_T_2 = _io_schedule_bits_d_valid_T_1 & w_grant; // @[MSHR.scala:131:33, :187:{42,57}]
assign io_schedule_bits_d_valid_0 = _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:84:7, :187:57]
wire _io_schedule_bits_e_valid_T = ~s_grantack; // @[MSHR.scala:136:33, :188:31]
assign _io_schedule_bits_e_valid_T_1 = _io_schedule_bits_e_valid_T & w_grantfirst; // @[MSHR.scala:129:33, :188:{31,43}]
assign io_schedule_bits_e_valid_0 = _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:84:7, :188:43]
wire _io_schedule_bits_x_valid_T = ~s_flush; // @[MSHR.scala:128:33, :189:31]
assign _io_schedule_bits_x_valid_T_1 = _io_schedule_bits_x_valid_T & w_releaseack; // @[MSHR.scala:125:33, :189:{31,40}]
assign io_schedule_bits_x_valid_0 = _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:84:7, :189:40]
wire _io_schedule_bits_dir_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :190:34]
wire _io_schedule_bits_dir_valid_T_1 = _io_schedule_bits_dir_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :190:{34,45}]
wire _io_schedule_bits_dir_valid_T_2 = ~s_writeback; // @[MSHR.scala:139:33, :190:70]
wire _io_schedule_bits_dir_valid_T_3 = _io_schedule_bits_dir_valid_T_2 & no_wait; // @[MSHR.scala:183:83, :190:{70,83}]
assign _io_schedule_bits_dir_valid_T_4 = _io_schedule_bits_dir_valid_T_1 | _io_schedule_bits_dir_valid_T_3; // @[MSHR.scala:190:{45,66,83}]
assign io_schedule_bits_dir_valid_0 = _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:84:7, :190:66]
wire _io_schedule_valid_T = io_schedule_bits_a_valid_0 | io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7, :192:49]
wire _io_schedule_valid_T_1 = _io_schedule_valid_T | io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7, :192:{49,77}]
wire _io_schedule_valid_T_2 = _io_schedule_valid_T_1 | io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7, :192:{77,105}]
wire _io_schedule_valid_T_3 = _io_schedule_valid_T_2 | io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7, :192:105, :193:49]
wire _io_schedule_valid_T_4 = _io_schedule_valid_T_3 | io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7, :193:{49,77}]
assign _io_schedule_valid_T_5 = _io_schedule_valid_T_4 | io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7, :193:{77,105}]
assign io_schedule_valid_0 = _io_schedule_valid_T_5; // @[MSHR.scala:84:7, :193:105]
wire _io_schedule_bits_dir_bits_data_WIRE_dirty = final_meta_writeback_dirty; // @[MSHR.scala:215:38, :310:71]
wire [1:0] _io_schedule_bits_dir_bits_data_WIRE_state = final_meta_writeback_state; // @[MSHR.scala:215:38, :310:71]
wire _io_schedule_bits_dir_bits_data_WIRE_clients = final_meta_writeback_clients; // @[MSHR.scala:215:38, :310:71]
wire after_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27]
wire prior_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27]
wire [8:0] _io_schedule_bits_dir_bits_data_WIRE_tag = final_meta_writeback_tag; // @[MSHR.scala:215:38, :310:71]
wire final_meta_writeback_hit; // @[MSHR.scala:215:38]
wire req_clientBit = request_source == 6'h28; // @[Parameters.scala:46:9]
wire _req_needT_T = request_opcode[2]; // @[Parameters.scala:269:12]
wire _final_meta_writeback_dirty_T_3 = request_opcode[2]; // @[Parameters.scala:269:12]
wire _req_needT_T_1 = ~_req_needT_T; // @[Parameters.scala:269:{5,12}]
wire _GEN = request_opcode == 3'h5; // @[Parameters.scala:270:13]
wire _req_needT_T_2; // @[Parameters.scala:270:13]
assign _req_needT_T_2 = _GEN; // @[Parameters.scala:270:13]
wire _excluded_client_T_6; // @[Parameters.scala:279:117]
assign _excluded_client_T_6 = _GEN; // @[Parameters.scala:270:13, :279:117]
wire _GEN_0 = request_param == 3'h1; // @[Parameters.scala:270:42]
wire _req_needT_T_3; // @[Parameters.scala:270:42]
assign _req_needT_T_3 = _GEN_0; // @[Parameters.scala:270:42]
wire _final_meta_writeback_clients_T; // @[Parameters.scala:282:11]
assign _final_meta_writeback_clients_T = _GEN_0; // @[Parameters.scala:270:42, :282:11]
wire _io_schedule_bits_d_bits_param_T_7; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_7 = _GEN_0; // @[Parameters.scala:270:42]
wire _req_needT_T_4 = _req_needT_T_2 & _req_needT_T_3; // @[Parameters.scala:270:{13,33,42}]
wire _req_needT_T_5 = _req_needT_T_1 | _req_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33]
wire _GEN_1 = request_opcode == 3'h6; // @[Parameters.scala:271:14]
wire _req_needT_T_6; // @[Parameters.scala:271:14]
assign _req_needT_T_6 = _GEN_1; // @[Parameters.scala:271:14]
wire _req_acquire_T; // @[MSHR.scala:219:36]
assign _req_acquire_T = _GEN_1; // @[Parameters.scala:271:14]
wire _excluded_client_T_1; // @[Parameters.scala:279:12]
assign _excluded_client_T_1 = _GEN_1; // @[Parameters.scala:271:14, :279:12]
wire _req_needT_T_7 = &request_opcode; // @[Parameters.scala:271:52]
wire _req_needT_T_8 = _req_needT_T_6 | _req_needT_T_7; // @[Parameters.scala:271:{14,42,52}]
wire _req_needT_T_9 = |request_param; // @[Parameters.scala:271:89]
wire _req_needT_T_10 = _req_needT_T_8 & _req_needT_T_9; // @[Parameters.scala:271:{42,80,89}]
wire req_needT = _req_needT_T_5 | _req_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80]
wire _req_acquire_T_1 = &request_opcode; // @[Parameters.scala:271:52]
wire req_acquire = _req_acquire_T | _req_acquire_T_1; // @[MSHR.scala:219:{36,53,71}]
wire meta_no_clients = ~_meta_no_clients_T; // @[MSHR.scala:220:{25,39}]
wire _req_promoteT_T = &meta_state; // @[MSHR.scala:100:17, :221:81]
wire _req_promoteT_T_1 = meta_no_clients & _req_promoteT_T; // @[MSHR.scala:220:25, :221:{67,81}]
wire _req_promoteT_T_2 = meta_hit ? _req_promoteT_T_1 : gotT; // @[MSHR.scala:100:17, :148:17, :221:{40,67}]
wire req_promoteT = req_acquire & _req_promoteT_T_2; // @[MSHR.scala:219:53, :221:{34,40}]
wire _final_meta_writeback_dirty_T = request_opcode[0]; // @[MSHR.scala:98:20, :224:65]
wire _final_meta_writeback_dirty_T_1 = meta_dirty | _final_meta_writeback_dirty_T; // @[MSHR.scala:100:17, :224:{48,65}]
wire _final_meta_writeback_state_T = request_param != 3'h3; // @[MSHR.scala:98:20, :225:55]
wire _GEN_2 = meta_state == 2'h2; // @[MSHR.scala:100:17, :225:78]
wire _final_meta_writeback_state_T_1; // @[MSHR.scala:225:78]
assign _final_meta_writeback_state_T_1 = _GEN_2; // @[MSHR.scala:225:78]
wire _final_meta_writeback_state_T_12; // @[MSHR.scala:240:70]
assign _final_meta_writeback_state_T_12 = _GEN_2; // @[MSHR.scala:225:78, :240:70]
wire _evict_T_2; // @[MSHR.scala:317:26]
assign _evict_T_2 = _GEN_2; // @[MSHR.scala:225:78, :317:26]
wire _before_T_1; // @[MSHR.scala:317:26]
assign _before_T_1 = _GEN_2; // @[MSHR.scala:225:78, :317:26]
wire _final_meta_writeback_state_T_2 = _final_meta_writeback_state_T & _final_meta_writeback_state_T_1; // @[MSHR.scala:225:{55,64,78}]
wire [1:0] _final_meta_writeback_state_T_3 = _final_meta_writeback_state_T_2 ? 2'h3 : meta_state; // @[MSHR.scala:100:17, :225:{40,64}]
wire _GEN_3 = request_param == 3'h2; // @[Parameters.scala:282:43]
wire _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:43]
assign _final_meta_writeback_clients_T_1 = _GEN_3; // @[Parameters.scala:282:43]
wire _io_schedule_bits_d_bits_param_T_5; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_5 = _GEN_3; // @[Parameters.scala:282:43]
wire _final_meta_writeback_clients_T_2 = _final_meta_writeback_clients_T | _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:{11,34,43}]
wire _final_meta_writeback_clients_T_3 = request_param == 3'h5; // @[Parameters.scala:282:75]
wire _final_meta_writeback_clients_T_4 = _final_meta_writeback_clients_T_2 | _final_meta_writeback_clients_T_3; // @[Parameters.scala:282:{34,66,75}]
wire _final_meta_writeback_clients_T_5 = _final_meta_writeback_clients_T_4 & req_clientBit; // @[Parameters.scala:46:9]
wire _final_meta_writeback_clients_T_6 = ~_final_meta_writeback_clients_T_5; // @[MSHR.scala:226:{52,56}]
wire _final_meta_writeback_clients_T_7 = meta_clients & _final_meta_writeback_clients_T_6; // @[MSHR.scala:100:17, :226:{50,52}]
wire _final_meta_writeback_clients_T_8 = ~probes_toN; // @[MSHR.scala:151:23, :232:54]
wire _final_meta_writeback_clients_T_9 = meta_clients & _final_meta_writeback_clients_T_8; // @[MSHR.scala:100:17, :232:{52,54}]
wire _final_meta_writeback_dirty_T_2 = meta_hit & meta_dirty; // @[MSHR.scala:100:17, :236:45]
wire _final_meta_writeback_dirty_T_4 = ~_final_meta_writeback_dirty_T_3; // @[MSHR.scala:236:{63,78}]
wire _final_meta_writeback_dirty_T_5 = _final_meta_writeback_dirty_T_2 | _final_meta_writeback_dirty_T_4; // @[MSHR.scala:236:{45,60,63}]
wire [1:0] _GEN_4 = {1'h1, ~req_acquire}; // @[MSHR.scala:219:53, :238:40]
wire [1:0] _final_meta_writeback_state_T_4; // @[MSHR.scala:238:40]
assign _final_meta_writeback_state_T_4 = _GEN_4; // @[MSHR.scala:238:40]
wire [1:0] _final_meta_writeback_state_T_6; // @[MSHR.scala:239:65]
assign _final_meta_writeback_state_T_6 = _GEN_4; // @[MSHR.scala:238:40, :239:65]
wire _final_meta_writeback_state_T_5 = ~meta_hit; // @[MSHR.scala:100:17, :239:41]
wire [1:0] _final_meta_writeback_state_T_7 = gotT ? _final_meta_writeback_state_T_6 : 2'h1; // @[MSHR.scala:148:17, :239:{55,65}]
wire _final_meta_writeback_state_T_8 = meta_no_clients & req_acquire; // @[MSHR.scala:219:53, :220:25, :244:72]
wire [1:0] _final_meta_writeback_state_T_9 = {1'h1, ~_final_meta_writeback_state_T_8}; // @[MSHR.scala:244:{55,72}]
wire _GEN_5 = meta_state == 2'h1; // @[MSHR.scala:100:17, :240:70]
wire _final_meta_writeback_state_T_10; // @[MSHR.scala:240:70]
assign _final_meta_writeback_state_T_10 = _GEN_5; // @[MSHR.scala:240:70]
wire _io_schedule_bits_c_bits_param_T; // @[MSHR.scala:291:53]
assign _io_schedule_bits_c_bits_param_T = _GEN_5; // @[MSHR.scala:240:70, :291:53]
wire _evict_T_1; // @[MSHR.scala:317:26]
assign _evict_T_1 = _GEN_5; // @[MSHR.scala:240:70, :317:26]
wire _before_T; // @[MSHR.scala:317:26]
assign _before_T = _GEN_5; // @[MSHR.scala:240:70, :317:26]
wire [1:0] _final_meta_writeback_state_T_13 = {_final_meta_writeback_state_T_12, 1'h1}; // @[MSHR.scala:240:70]
wire _final_meta_writeback_state_T_14 = &meta_state; // @[MSHR.scala:100:17, :221:81, :240:70]
wire [1:0] _final_meta_writeback_state_T_15 = _final_meta_writeback_state_T_14 ? _final_meta_writeback_state_T_9 : _final_meta_writeback_state_T_13; // @[MSHR.scala:240:70, :244:55]
wire [1:0] _final_meta_writeback_state_T_16 = _final_meta_writeback_state_T_5 ? _final_meta_writeback_state_T_7 : _final_meta_writeback_state_T_15; // @[MSHR.scala:239:{40,41,55}, :240:70]
wire [1:0] _final_meta_writeback_state_T_17 = req_needT ? _final_meta_writeback_state_T_4 : _final_meta_writeback_state_T_16; // @[Parameters.scala:270:70]
wire _final_meta_writeback_clients_T_10 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :245:66]
wire _final_meta_writeback_clients_T_11 = meta_clients & _final_meta_writeback_clients_T_10; // @[MSHR.scala:100:17, :245:{64,66}]
wire _final_meta_writeback_clients_T_12 = meta_hit & _final_meta_writeback_clients_T_11; // @[MSHR.scala:100:17, :245:{40,64}]
wire _final_meta_writeback_clients_T_13 = req_acquire & req_clientBit; // @[Parameters.scala:46:9]
wire _final_meta_writeback_clients_T_14 = _final_meta_writeback_clients_T_12 | _final_meta_writeback_clients_T_13; // @[MSHR.scala:245:{40,84}, :246:40]
assign final_meta_writeback_tag = request_prio_2 | request_control ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :215:38, :223:52, :228:53, :247:30]
wire _final_meta_writeback_clients_T_15 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :258:54]
wire _final_meta_writeback_clients_T_16 = meta_clients & _final_meta_writeback_clients_T_15; // @[MSHR.scala:100:17, :258:{52,54}]
assign final_meta_writeback_hit = bad_grant ? meta_hit : request_prio_2 | ~request_control; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :227:34, :228:53, :234:30, :248:30, :251:20, :252:21]
assign final_meta_writeback_dirty = ~bad_grant & (request_prio_2 ? _final_meta_writeback_dirty_T_1 : request_control ? ~meta_hit & meta_dirty : _final_meta_writeback_dirty_T_5); // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :224:{34,48}, :228:53, :229:21, :230:36, :236:{32,60}, :251:20, :252:21]
assign final_meta_writeback_state = bad_grant ? {1'h0, meta_hit} : request_prio_2 ? _final_meta_writeback_state_T_3 : request_control ? (meta_hit ? 2'h0 : meta_state) : _final_meta_writeback_state_T_17; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :225:{34,40}, :228:53, :229:21, :231:36, :237:{32,38}, :251:20, :252:21, :257:36, :263:36]
assign final_meta_writeback_clients = bad_grant ? meta_hit & _final_meta_writeback_clients_T_16 : request_prio_2 ? _final_meta_writeback_clients_T_7 : request_control ? (meta_hit ? _final_meta_writeback_clients_T_9 : meta_clients) : _final_meta_writeback_clients_T_14; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :226:{34,50}, :228:53, :229:21, :232:{36,52}, :245:{34,84}, :251:20, :252:21, :258:{36,52}, :264:36]
wire _honour_BtoT_T = meta_clients & req_clientBit; // @[Parameters.scala:46:9]
wire _honour_BtoT_T_1 = _honour_BtoT_T; // @[MSHR.scala:276:{47,64}]
wire honour_BtoT = meta_hit & _honour_BtoT_T_1; // @[MSHR.scala:100:17, :276:{30,64}]
wire _excluded_client_T = meta_hit & request_prio_0; // @[MSHR.scala:98:20, :100:17, :279:38]
wire _excluded_client_T_2 = &request_opcode; // @[Parameters.scala:271:52, :279:50]
wire _excluded_client_T_3 = _excluded_client_T_1 | _excluded_client_T_2; // @[Parameters.scala:279:{12,40,50}]
wire _excluded_client_T_4 = request_opcode == 3'h4; // @[Parameters.scala:279:87]
wire _excluded_client_T_5 = _excluded_client_T_3 | _excluded_client_T_4; // @[Parameters.scala:279:{40,77,87}]
wire _excluded_client_T_8 = _excluded_client_T_5; // @[Parameters.scala:279:{77,106}]
wire _excluded_client_T_9 = _excluded_client_T & _excluded_client_T_8; // @[Parameters.scala:279:106]
wire excluded_client = _excluded_client_T_9 & req_clientBit; // @[Parameters.scala:46:9]
wire [1:0] _io_schedule_bits_a_bits_param_T = meta_hit ? 2'h2 : 2'h1; // @[MSHR.scala:100:17, :282:56]
wire [1:0] _io_schedule_bits_a_bits_param_T_1 = req_needT ? _io_schedule_bits_a_bits_param_T : 2'h0; // @[Parameters.scala:270:70]
assign io_schedule_bits_a_bits_param_0 = {1'h0, _io_schedule_bits_a_bits_param_T_1}; // @[MSHR.scala:84:7, :282:{35,41}]
wire _io_schedule_bits_a_bits_block_T = request_size != 3'h6; // @[MSHR.scala:98:20, :283:51]
wire _io_schedule_bits_a_bits_block_T_1 = request_opcode == 3'h0; // @[MSHR.scala:98:20, :284:55]
wire _io_schedule_bits_a_bits_block_T_2 = &request_opcode; // @[Parameters.scala:271:52]
wire _io_schedule_bits_a_bits_block_T_3 = _io_schedule_bits_a_bits_block_T_1 | _io_schedule_bits_a_bits_block_T_2; // @[MSHR.scala:284:{55,71,89}]
wire _io_schedule_bits_a_bits_block_T_4 = ~_io_schedule_bits_a_bits_block_T_3; // @[MSHR.scala:284:{38,71}]
assign _io_schedule_bits_a_bits_block_T_5 = _io_schedule_bits_a_bits_block_T | _io_schedule_bits_a_bits_block_T_4; // @[MSHR.scala:283:{51,91}, :284:38]
assign io_schedule_bits_a_bits_block_0 = _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:84:7, :283:91]
wire _io_schedule_bits_b_bits_param_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :286:42]
wire [1:0] _io_schedule_bits_b_bits_param_T_1 = req_needT ? 2'h2 : 2'h1; // @[Parameters.scala:270:70]
wire [2:0] _io_schedule_bits_b_bits_param_T_2 = request_prio_1 ? request_param : {1'h0, _io_schedule_bits_b_bits_param_T_1}; // @[MSHR.scala:98:20, :286:{61,97}]
assign _io_schedule_bits_b_bits_param_T_3 = _io_schedule_bits_b_bits_param_T ? 3'h2 : _io_schedule_bits_b_bits_param_T_2; // @[MSHR.scala:286:{41,42,61}]
assign io_schedule_bits_b_bits_param_0 = _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:84:7, :286:41]
wire _io_schedule_bits_b_bits_tag_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :287:42]
assign _io_schedule_bits_b_bits_tag_T_1 = _io_schedule_bits_b_bits_tag_T ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :287:{41,42}]
assign io_schedule_bits_b_bits_tag_0 = _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:84:7, :287:41]
wire _io_schedule_bits_b_bits_clients_T = ~excluded_client; // @[MSHR.scala:279:28, :289:53]
assign _io_schedule_bits_b_bits_clients_T_1 = meta_clients & _io_schedule_bits_b_bits_clients_T; // @[MSHR.scala:100:17, :289:{51,53}]
assign io_schedule_bits_b_bits_clients_0 = _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:84:7, :289:51]
assign _io_schedule_bits_c_bits_opcode_T = {2'h3, meta_dirty}; // @[MSHR.scala:100:17, :290:41]
assign io_schedule_bits_c_bits_opcode_0 = _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:84:7, :290:41]
assign _io_schedule_bits_c_bits_param_T_1 = _io_schedule_bits_c_bits_param_T ? 3'h2 : 3'h1; // @[MSHR.scala:291:{41,53}]
assign io_schedule_bits_c_bits_param_0 = _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:84:7, :291:41]
wire _io_schedule_bits_d_bits_param_T = ~req_acquire; // @[MSHR.scala:219:53, :298:42]
wire [1:0] _io_schedule_bits_d_bits_param_T_1 = {1'h0, req_promoteT}; // @[MSHR.scala:221:34, :300:53]
wire [1:0] _io_schedule_bits_d_bits_param_T_2 = honour_BtoT ? 2'h2 : 2'h1; // @[MSHR.scala:276:30, :301:53]
wire _io_schedule_bits_d_bits_param_T_3 = ~(|request_param); // @[Parameters.scala:271:89]
wire [2:0] _io_schedule_bits_d_bits_param_T_4 = _io_schedule_bits_d_bits_param_T_3 ? {1'h0, _io_schedule_bits_d_bits_param_T_1} : request_param; // @[MSHR.scala:98:20, :299:79, :300:53]
wire [2:0] _io_schedule_bits_d_bits_param_T_6 = _io_schedule_bits_d_bits_param_T_5 ? {1'h0, _io_schedule_bits_d_bits_param_T_2} : _io_schedule_bits_d_bits_param_T_4; // @[MSHR.scala:299:79, :301:53]
wire [2:0] _io_schedule_bits_d_bits_param_T_8 = _io_schedule_bits_d_bits_param_T_7 ? 3'h1 : _io_schedule_bits_d_bits_param_T_6; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_9 = _io_schedule_bits_d_bits_param_T ? request_param : _io_schedule_bits_d_bits_param_T_8; // @[MSHR.scala:98:20, :298:{41,42}, :299:79]
assign io_schedule_bits_d_bits_param_0 = _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:84:7, :298:41]
wire _io_schedule_bits_dir_bits_data_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :310:42]
assign _io_schedule_bits_dir_bits_data_T_1_dirty = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_dirty; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_state = _io_schedule_bits_dir_bits_data_T ? 2'h0 : _io_schedule_bits_dir_bits_data_WIRE_state; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_clients = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_clients; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_tag = _io_schedule_bits_dir_bits_data_T ? 9'h0 : _io_schedule_bits_dir_bits_data_WIRE_tag; // @[MSHR.scala:310:{41,42,71}]
assign io_schedule_bits_dir_bits_data_dirty_0 = _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_state_0 = _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_clients_0 = _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_tag_0 = _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:84:7, :310:41]
wire _evict_T = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :338:32]
wire [3:0] evict; // @[MSHR.scala:314:26]
wire _evict_out_T = ~evict_c; // @[MSHR.scala:315:27, :318:32]
wire [1:0] _GEN_6 = {1'h1, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32]
wire [1:0] _evict_out_T_1; // @[MSHR.scala:319:32]
assign _evict_out_T_1 = _GEN_6; // @[MSHR.scala:319:32]
wire [1:0] _before_out_T_1; // @[MSHR.scala:319:32]
assign _before_out_T_1 = _GEN_6; // @[MSHR.scala:319:32]
wire _evict_T_3 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26]
wire [2:0] _GEN_7 = {2'h2, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:39]
wire [2:0] _evict_out_T_2; // @[MSHR.scala:320:39]
assign _evict_out_T_2 = _GEN_7; // @[MSHR.scala:320:39]
wire [2:0] _before_out_T_2; // @[MSHR.scala:320:39]
assign _before_out_T_2 = _GEN_7; // @[MSHR.scala:320:39]
wire [2:0] _GEN_8 = {2'h3, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:76]
wire [2:0] _evict_out_T_3; // @[MSHR.scala:320:76]
assign _evict_out_T_3 = _GEN_8; // @[MSHR.scala:320:76]
wire [2:0] _before_out_T_3; // @[MSHR.scala:320:76]
assign _before_out_T_3 = _GEN_8; // @[MSHR.scala:320:76]
wire [2:0] _evict_out_T_4 = evict_c ? _evict_out_T_2 : _evict_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _evict_T_4 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26]
wire _evict_T_5 = ~_evict_T; // @[MSHR.scala:323:11, :338:32]
assign evict = _evict_T_5 ? 4'h8 : _evict_T_1 ? {3'h0, _evict_out_T} : _evict_T_2 ? {2'h0, _evict_out_T_1} : _evict_T_3 ? {1'h0, _evict_out_T_4} : {_evict_T_4, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}]
wire [3:0] before_0; // @[MSHR.scala:314:26]
wire _before_out_T = ~before_c; // @[MSHR.scala:315:27, :318:32]
wire _before_T_2 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26]
wire [2:0] _before_out_T_4 = before_c ? _before_out_T_2 : _before_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _before_T_3 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26]
wire _before_T_4 = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :323:11]
assign before_0 = _before_T_4 ? 4'h8 : _before_T ? {3'h0, _before_out_T} : _before_T_1 ? {2'h0, _before_out_T_1} : _before_T_2 ? {1'h0, _before_out_T_4} : {_before_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}]
wire [3:0] after; // @[MSHR.scala:314:26]
wire _GEN_9 = final_meta_writeback_state == 2'h1; // @[MSHR.scala:215:38, :317:26]
wire _after_T; // @[MSHR.scala:317:26]
assign _after_T = _GEN_9; // @[MSHR.scala:317:26]
wire _prior_T; // @[MSHR.scala:317:26]
assign _prior_T = _GEN_9; // @[MSHR.scala:317:26]
wire _after_out_T = ~after_c; // @[MSHR.scala:315:27, :318:32]
wire _GEN_10 = final_meta_writeback_state == 2'h2; // @[MSHR.scala:215:38, :317:26]
wire _after_T_1; // @[MSHR.scala:317:26]
assign _after_T_1 = _GEN_10; // @[MSHR.scala:317:26]
wire _prior_T_1; // @[MSHR.scala:317:26]
assign _prior_T_1 = _GEN_10; // @[MSHR.scala:317:26]
wire [1:0] _GEN_11 = {1'h1, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32]
wire [1:0] _after_out_T_1; // @[MSHR.scala:319:32]
assign _after_out_T_1 = _GEN_11; // @[MSHR.scala:319:32]
wire [1:0] _prior_out_T_1; // @[MSHR.scala:319:32]
assign _prior_out_T_1 = _GEN_11; // @[MSHR.scala:319:32]
wire _after_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26]
wire [2:0] _GEN_12 = {2'h2, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:39]
wire [2:0] _after_out_T_2; // @[MSHR.scala:320:39]
assign _after_out_T_2 = _GEN_12; // @[MSHR.scala:320:39]
wire [2:0] _prior_out_T_2; // @[MSHR.scala:320:39]
assign _prior_out_T_2 = _GEN_12; // @[MSHR.scala:320:39]
wire [2:0] _GEN_13 = {2'h3, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:76]
wire [2:0] _after_out_T_3; // @[MSHR.scala:320:76]
assign _after_out_T_3 = _GEN_13; // @[MSHR.scala:320:76]
wire [2:0] _prior_out_T_3; // @[MSHR.scala:320:76]
assign _prior_out_T_3 = _GEN_13; // @[MSHR.scala:320:76]
wire [2:0] _after_out_T_4 = after_c ? _after_out_T_2 : _after_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _GEN_14 = final_meta_writeback_state == 2'h0; // @[MSHR.scala:215:38, :317:26]
wire _after_T_3; // @[MSHR.scala:317:26]
assign _after_T_3 = _GEN_14; // @[MSHR.scala:317:26]
wire _prior_T_3; // @[MSHR.scala:317:26]
assign _prior_T_3 = _GEN_14; // @[MSHR.scala:317:26]
assign after = _after_T ? {3'h0, _after_out_T} : _after_T_1 ? {2'h0, _after_out_T_1} : _after_T_2 ? {1'h0, _after_out_T_4} : {_after_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26]
wire probe_bit = io_sinkc_bits_source_0 == 6'h28; // @[Parameters.scala:46:9]
wire _GEN_15 = probes_done | probe_bit; // @[Parameters.scala:46:9]
wire _last_probe_T; // @[MSHR.scala:459:33]
assign _last_probe_T = _GEN_15; // @[MSHR.scala:459:33]
wire _probes_done_T; // @[MSHR.scala:467:32]
assign _probes_done_T = _GEN_15; // @[MSHR.scala:459:33, :467:32]
wire _last_probe_T_1 = ~excluded_client; // @[MSHR.scala:279:28, :289:53, :459:66]
wire _last_probe_T_2 = meta_clients & _last_probe_T_1; // @[MSHR.scala:100:17, :459:{64,66}]
wire last_probe = _last_probe_T == _last_probe_T_2; // @[MSHR.scala:459:{33,46,64}]
wire _probe_toN_T = io_sinkc_bits_param_0 == 3'h1; // @[Parameters.scala:282:11]
wire _probe_toN_T_1 = io_sinkc_bits_param_0 == 3'h2; // @[Parameters.scala:282:43]
wire _probe_toN_T_2 = _probe_toN_T | _probe_toN_T_1; // @[Parameters.scala:282:{11,34,43}]
wire _probe_toN_T_3 = io_sinkc_bits_param_0 == 3'h5; // @[Parameters.scala:282:75]
wire probe_toN = _probe_toN_T_2 | _probe_toN_T_3; // @[Parameters.scala:282:{34,66,75}]
wire _probes_toN_T = probe_toN & probe_bit; // @[Parameters.scala:46:9]
wire _probes_toN_T_1 = probes_toN | _probes_toN_T; // @[MSHR.scala:151:23, :468:{30,35}]
wire _probes_noT_T = io_sinkc_bits_param_0 != 3'h3; // @[MSHR.scala:84:7, :469:53]
wire _probes_noT_T_1 = probes_noT | _probes_noT_T; // @[MSHR.scala:152:23, :469:{30,53}]
wire _w_rprobeackfirst_T = w_rprobeackfirst | last_probe; // @[MSHR.scala:122:33, :459:46, :470:42]
wire _GEN_16 = last_probe & io_sinkc_bits_last_0; // @[MSHR.scala:84:7, :459:46, :471:55]
wire _w_rprobeacklast_T; // @[MSHR.scala:471:55]
assign _w_rprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55]
wire _w_pprobeacklast_T; // @[MSHR.scala:473:55]
assign _w_pprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55, :473:55]
wire _w_rprobeacklast_T_1 = w_rprobeacklast | _w_rprobeacklast_T; // @[MSHR.scala:123:33, :471:{40,55}]
wire _w_pprobeackfirst_T = w_pprobeackfirst | last_probe; // @[MSHR.scala:132:33, :459:46, :472:42]
wire _w_pprobeacklast_T_1 = w_pprobeacklast | _w_pprobeacklast_T; // @[MSHR.scala:133:33, :473:{40,55}]
wire _set_pprobeack_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77]
wire _set_pprobeack_T_1 = io_sinkc_bits_last_0 | _set_pprobeack_T; // @[MSHR.scala:84:7, :475:{59,77}]
wire set_pprobeack = last_probe & _set_pprobeack_T_1; // @[MSHR.scala:459:46, :475:{36,59}]
wire _w_pprobeack_T = w_pprobeack | set_pprobeack; // @[MSHR.scala:134:33, :475:36, :476:32]
wire _w_grant_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77, :490:33]
wire _w_grant_T_1 = _w_grant_T | io_sinkd_bits_last_0; // @[MSHR.scala:84:7, :490:{33,41}]
wire _gotT_T = io_sinkd_bits_param_0 == 3'h0; // @[MSHR.scala:84:7, :493:35]
wire _new_meta_T = io_allocate_valid_0 & io_allocate_bits_repeat_0; // @[MSHR.scala:84:7, :505:40]
wire new_meta_dirty = _new_meta_T ? final_meta_writeback_dirty : io_directory_bits_dirty_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [1:0] new_meta_state = _new_meta_T ? final_meta_writeback_state : io_directory_bits_state_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_meta_clients = _new_meta_T ? final_meta_writeback_clients : io_directory_bits_clients_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [8:0] new_meta_tag = _new_meta_T ? final_meta_writeback_tag : io_directory_bits_tag_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_meta_hit = _new_meta_T ? final_meta_writeback_hit : io_directory_bits_hit_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [3:0] new_meta_way = _new_meta_T ? final_meta_writeback_way : io_directory_bits_way_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_request_prio_0 = io_allocate_valid_0 ? allocate_as_full_prio_0 : request_prio_0; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_prio_1 = io_allocate_valid_0 ? allocate_as_full_prio_1 : request_prio_1; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_prio_2 = io_allocate_valid_0 ? allocate_as_full_prio_2 : request_prio_2; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_control = io_allocate_valid_0 ? allocate_as_full_control : request_control; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_opcode = io_allocate_valid_0 ? allocate_as_full_opcode : request_opcode; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_param = io_allocate_valid_0 ? allocate_as_full_param : request_param; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_size = io_allocate_valid_0 ? allocate_as_full_size : request_size; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_source = io_allocate_valid_0 ? allocate_as_full_source : request_source; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [8:0] new_request_tag = io_allocate_valid_0 ? allocate_as_full_tag : request_tag; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_offset = io_allocate_valid_0 ? allocate_as_full_offset : request_offset; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_put = io_allocate_valid_0 ? allocate_as_full_put : request_put; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [10:0] new_request_set = io_allocate_valid_0 ? allocate_as_full_set : request_set; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire _new_needT_T = new_request_opcode[2]; // @[Parameters.scala:269:12]
wire _new_needT_T_1 = ~_new_needT_T; // @[Parameters.scala:269:{5,12}]
wire _GEN_17 = new_request_opcode == 3'h5; // @[Parameters.scala:270:13]
wire _new_needT_T_2; // @[Parameters.scala:270:13]
assign _new_needT_T_2 = _GEN_17; // @[Parameters.scala:270:13]
wire _new_skipProbe_T_5; // @[Parameters.scala:279:117]
assign _new_skipProbe_T_5 = _GEN_17; // @[Parameters.scala:270:13, :279:117]
wire _new_needT_T_3 = new_request_param == 3'h1; // @[Parameters.scala:270:42]
wire _new_needT_T_4 = _new_needT_T_2 & _new_needT_T_3; // @[Parameters.scala:270:{13,33,42}]
wire _new_needT_T_5 = _new_needT_T_1 | _new_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33]
wire _T_615 = new_request_opcode == 3'h6; // @[Parameters.scala:271:14]
wire _new_needT_T_6; // @[Parameters.scala:271:14]
assign _new_needT_T_6 = _T_615; // @[Parameters.scala:271:14]
wire _new_skipProbe_T; // @[Parameters.scala:279:12]
assign _new_skipProbe_T = _T_615; // @[Parameters.scala:271:14, :279:12]
wire _new_needT_T_7 = &new_request_opcode; // @[Parameters.scala:271:52]
wire _new_needT_T_8 = _new_needT_T_6 | _new_needT_T_7; // @[Parameters.scala:271:{14,42,52}]
wire _new_needT_T_9 = |new_request_param; // @[Parameters.scala:271:89]
wire _new_needT_T_10 = _new_needT_T_8 & _new_needT_T_9; // @[Parameters.scala:271:{42,80,89}]
wire new_needT = _new_needT_T_5 | _new_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80]
wire new_clientBit = new_request_source == 6'h28; // @[Parameters.scala:46:9]
wire _new_skipProbe_T_1 = &new_request_opcode; // @[Parameters.scala:271:52, :279:50]
wire _new_skipProbe_T_2 = _new_skipProbe_T | _new_skipProbe_T_1; // @[Parameters.scala:279:{12,40,50}]
wire _new_skipProbe_T_3 = new_request_opcode == 3'h4; // @[Parameters.scala:279:87]
wire _new_skipProbe_T_4 = _new_skipProbe_T_2 | _new_skipProbe_T_3; // @[Parameters.scala:279:{40,77,87}]
wire _new_skipProbe_T_7 = _new_skipProbe_T_4; // @[Parameters.scala:279:{77,106}]
wire new_skipProbe = _new_skipProbe_T_7 & new_clientBit; // @[Parameters.scala:46:9]
wire [3:0] prior; // @[MSHR.scala:314:26]
wire _prior_out_T = ~prior_c; // @[MSHR.scala:315:27, :318:32]
wire _prior_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26]
wire [2:0] _prior_out_T_4 = prior_c ? _prior_out_T_2 : _prior_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
assign prior = _prior_T ? {3'h0, _prior_out_T} : _prior_T_1 ? {2'h0, _prior_out_T_1} : _prior_T_2 ? {1'h0, _prior_out_T_4} : {_prior_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26]
wire _T_574 = io_directory_valid_0 | _new_meta_T; // @[MSHR.scala:84:7, :505:40, :539:28] |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module MacUnit_173( // @[PE.scala:14:7]
input clock, // @[PE.scala:14:7]
input reset, // @[PE.scala:14:7]
input [7:0] io_in_a, // @[PE.scala:16:14]
input [7:0] io_in_b, // @[PE.scala:16:14]
input [19:0] io_in_c, // @[PE.scala:16:14]
output [19:0] io_out_d // @[PE.scala:16:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7]
wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7]
wire [19:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7]
wire [19:0] _io_out_d_T_3; // @[Arithmetic.scala:93:54]
wire [19:0] io_out_d_0; // @[PE.scala:14:7]
wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7]
wire [20:0] _io_out_d_T_1 = {{5{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[19], io_in_c_0}; // @[PE.scala:14:7]
wire [19:0] _io_out_d_T_2 = _io_out_d_T_1[19:0]; // @[Arithmetic.scala:93:54]
assign _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54]
assign io_out_d_0 = _io_out_d_T_3; // @[PE.scala:14:7]
assign io_out_d = io_out_d_0; // @[PE.scala:14:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File Nodes.scala:
package constellation.channel
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Parameters, Field}
import freechips.rocketchip.diplomacy._
case class EmptyParams()
case class ChannelEdgeParams(cp: ChannelParams, p: Parameters)
object ChannelImp extends SimpleNodeImp[EmptyParams, ChannelParams, ChannelEdgeParams, Channel] {
def edge(pd: EmptyParams, pu: ChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
ChannelEdgeParams(pu, p)
}
def bundle(e: ChannelEdgeParams) = new Channel(e.cp)(e.p)
def render(e: ChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#0000ff", label = e.cp.payloadBits.toString)
}
override def monitor(bundle: Channel, edge: ChannelEdgeParams): Unit = {
val monitor = Module(new NoCMonitor(edge.cp)(edge.p))
monitor.io.in := bundle
}
// TODO: Add nodepath stuff? override def mixO, override def mixI
}
case class ChannelSourceNode(val destId: Int)(implicit valName: ValName) extends SourceNode(ChannelImp)(Seq(EmptyParams()))
case class ChannelDestNode(val destParams: ChannelParams)(implicit valName: ValName) extends SinkNode(ChannelImp)(Seq(destParams))
case class ChannelAdapterNode(
slaveFn: ChannelParams => ChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(ChannelImp)((e: EmptyParams) => e, slaveFn)
case class ChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(ChannelImp)()
case class ChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(ChannelImp)()
case class IngressChannelEdgeParams(cp: IngressChannelParams, p: Parameters)
case class EgressChannelEdgeParams(cp: EgressChannelParams, p: Parameters)
object IngressChannelImp extends SimpleNodeImp[EmptyParams, IngressChannelParams, IngressChannelEdgeParams, IngressChannel] {
def edge(pd: EmptyParams, pu: IngressChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
IngressChannelEdgeParams(pu, p)
}
def bundle(e: IngressChannelEdgeParams) = new IngressChannel(e.cp)(e.p)
def render(e: IngressChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#00ff00", label = e.cp.payloadBits.toString)
}
}
object EgressChannelImp extends SimpleNodeImp[EmptyParams, EgressChannelParams, EgressChannelEdgeParams, EgressChannel] {
def edge(pd: EmptyParams, pu: EgressChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
EgressChannelEdgeParams(pu, p)
}
def bundle(e: EgressChannelEdgeParams) = new EgressChannel(e.cp)(e.p)
def render(e: EgressChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#ff0000", label = e.cp.payloadBits.toString)
}
}
case class IngressChannelSourceNode(val destId: Int)(implicit valName: ValName) extends SourceNode(IngressChannelImp)(Seq(EmptyParams()))
case class IngressChannelDestNode(val destParams: IngressChannelParams)(implicit valName: ValName) extends SinkNode(IngressChannelImp)(Seq(destParams))
case class EgressChannelSourceNode(val egressId: Int)(implicit valName: ValName) extends SourceNode(EgressChannelImp)(Seq(EmptyParams()))
case class EgressChannelDestNode(val destParams: EgressChannelParams)(implicit valName: ValName) extends SinkNode(EgressChannelImp)(Seq(destParams))
case class IngressChannelAdapterNode(
slaveFn: IngressChannelParams => IngressChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(IngressChannelImp)(m => m, slaveFn)
case class EgressChannelAdapterNode(
slaveFn: EgressChannelParams => EgressChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(EgressChannelImp)(m => m, slaveFn)
case class IngressChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(IngressChannelImp)()
case class EgressChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(EgressChannelImp)()
case class IngressChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(IngressChannelImp)()
case class EgressChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(EgressChannelImp)()
File Router.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{RoutingRelation}
import constellation.noc.{HasNoCParams}
case class UserRouterParams(
// Payload width. Must match payload width on all channels attached to this routing node
payloadBits: Int = 64,
// Combines SA and ST stages (removes pipeline register)
combineSAST: Boolean = false,
// Combines RC and VA stages (removes pipeline register)
combineRCVA: Boolean = false,
// Adds combinational path from SA to VA
coupleSAVA: Boolean = false,
vcAllocator: VCAllocatorParams => Parameters => VCAllocator = (vP) => (p) => new RotatingSingleVCAllocator(vP)(p)
)
case class RouterParams(
nodeId: Int,
nIngress: Int,
nEgress: Int,
user: UserRouterParams
)
trait HasRouterOutputParams {
def outParams: Seq[ChannelParams]
def egressParams: Seq[EgressChannelParams]
def allOutParams = outParams ++ egressParams
def nOutputs = outParams.size
def nEgress = egressParams.size
def nAllOutputs = allOutParams.size
}
trait HasRouterInputParams {
def inParams: Seq[ChannelParams]
def ingressParams: Seq[IngressChannelParams]
def allInParams = inParams ++ ingressParams
def nInputs = inParams.size
def nIngress = ingressParams.size
def nAllInputs = allInParams.size
}
trait HasRouterParams
{
def routerParams: RouterParams
def nodeId = routerParams.nodeId
def payloadBits = routerParams.user.payloadBits
}
class DebugBundle(val nIn: Int) extends Bundle {
val va_stall = Vec(nIn, UInt())
val sa_stall = Vec(nIn, UInt())
}
class Router(
val routerParams: RouterParams,
preDiplomaticInParams: Seq[ChannelParams],
preDiplomaticIngressParams: Seq[IngressChannelParams],
outDests: Seq[Int],
egressIds: Seq[Int]
)(implicit p: Parameters) extends LazyModule with HasNoCParams with HasRouterParams {
val allPreDiplomaticInParams = preDiplomaticInParams ++ preDiplomaticIngressParams
val destNodes = preDiplomaticInParams.map(u => ChannelDestNode(u))
val sourceNodes = outDests.map(u => ChannelSourceNode(u))
val ingressNodes = preDiplomaticIngressParams.map(u => IngressChannelDestNode(u))
val egressNodes = egressIds.map(u => EgressChannelSourceNode(u))
val debugNode = BundleBridgeSource(() => new DebugBundle(allPreDiplomaticInParams.size))
val ctrlNode = if (hasCtrl) Some(BundleBridgeSource(() => new RouterCtrlBundle)) else None
def inParams = module.inParams
def outParams = module.outParams
def ingressParams = module.ingressParams
def egressParams = module.egressParams
lazy val module = new LazyModuleImp(this) with HasRouterInputParams with HasRouterOutputParams {
val (io_in, edgesIn) = destNodes.map(_.in(0)).unzip
val (io_out, edgesOut) = sourceNodes.map(_.out(0)).unzip
val (io_ingress, edgesIngress) = ingressNodes.map(_.in(0)).unzip
val (io_egress, edgesEgress) = egressNodes.map(_.out(0)).unzip
val io_debug = debugNode.out(0)._1
val inParams = edgesIn.map(_.cp)
val outParams = edgesOut.map(_.cp)
val ingressParams = edgesIngress.map(_.cp)
val egressParams = edgesEgress.map(_.cp)
allOutParams.foreach(u => require(u.srcId == nodeId && u.payloadBits == routerParams.user.payloadBits))
allInParams.foreach(u => require(u.destId == nodeId && u.payloadBits == routerParams.user.payloadBits))
require(nIngress == routerParams.nIngress)
require(nEgress == routerParams.nEgress)
require(nAllInputs >= 1)
require(nAllOutputs >= 1)
require(nodeId < (1 << nodeIdBits))
val input_units = inParams.zipWithIndex.map { case (u,i) =>
Module(new InputUnit(u, outParams, egressParams,
routerParams.user.combineRCVA, routerParams.user.combineSAST))
.suggestName(s"input_unit_${i}_from_${u.srcId}") }
val ingress_units = ingressParams.zipWithIndex.map { case (u,i) =>
Module(new IngressUnit(i, u, outParams, egressParams,
routerParams.user.combineRCVA, routerParams.user.combineSAST))
.suggestName(s"ingress_unit_${i+nInputs}_from_${u.ingressId}") }
val all_input_units = input_units ++ ingress_units
val output_units = outParams.zipWithIndex.map { case (u,i) =>
Module(new OutputUnit(inParams, ingressParams, u))
.suggestName(s"output_unit_${i}_to_${u.destId}")}
val egress_units = egressParams.zipWithIndex.map { case (u,i) =>
Module(new EgressUnit(routerParams.user.coupleSAVA && all_input_units.size == 1,
routerParams.user.combineSAST,
inParams, ingressParams, u))
.suggestName(s"egress_unit_${i+nOutputs}_to_${u.egressId}")}
val all_output_units = output_units ++ egress_units
val switch = Module(new Switch(routerParams, inParams, outParams, ingressParams, egressParams))
val switch_allocator = Module(new SwitchAllocator(routerParams, inParams, outParams, ingressParams, egressParams))
val vc_allocator = Module(routerParams.user.vcAllocator(
VCAllocatorParams(routerParams, inParams, outParams, ingressParams, egressParams)
)(p))
val route_computer = Module(new RouteComputer(routerParams, inParams, outParams, ingressParams, egressParams))
val fires_count = WireInit(PopCount(vc_allocator.io.req.map(_.fire)))
dontTouch(fires_count)
(io_in zip input_units ).foreach { case (i,u) => u.io.in <> i }
(io_ingress zip ingress_units).foreach { case (i,u) => u.io.in <> i.flit }
(output_units zip io_out ).foreach { case (u,o) => o <> u.io.out }
(egress_units zip io_egress).foreach { case (u,o) => o.flit <> u.io.out }
(route_computer.io.req zip all_input_units).foreach {
case (i,u) => i <> u.io.router_req }
(all_input_units zip route_computer.io.resp).foreach {
case (u,o) => u.io.router_resp <> o }
(vc_allocator.io.req zip all_input_units).foreach {
case (i,u) => i <> u.io.vcalloc_req }
(all_input_units zip vc_allocator.io.resp).foreach {
case (u,o) => u.io.vcalloc_resp <> o }
(all_output_units zip vc_allocator.io.out_allocs).foreach {
case (u,a) => u.io.allocs <> a }
(vc_allocator.io.channel_status zip all_output_units).foreach {
case (a,u) => a := u.io.channel_status }
all_input_units.foreach(in => all_output_units.zipWithIndex.foreach { case (out,outIdx) =>
in.io.out_credit_available(outIdx) := out.io.credit_available
})
(all_input_units zip switch_allocator.io.req).foreach {
case (u,r) => r <> u.io.salloc_req }
(all_output_units zip switch_allocator.io.credit_alloc).foreach {
case (u,a) => u.io.credit_alloc := a }
(switch.io.in zip all_input_units).foreach {
case (i,u) => i <> u.io.out }
(all_output_units zip switch.io.out).foreach {
case (u,o) => u.io.in <> o }
switch.io.sel := (if (routerParams.user.combineSAST) {
switch_allocator.io.switch_sel
} else {
RegNext(switch_allocator.io.switch_sel)
})
if (hasCtrl) {
val io_ctrl = ctrlNode.get.out(0)._1
val ctrl = Module(new RouterControlUnit(routerParams, inParams, outParams, ingressParams, egressParams))
io_ctrl <> ctrl.io.ctrl
(all_input_units zip ctrl.io.in_block ).foreach { case (l,r) => l.io.block := r }
(all_input_units zip ctrl.io.in_fire ).foreach { case (l,r) => r := l.io.out.map(_.valid) }
} else {
input_units.foreach(_.io.block := false.B)
ingress_units.foreach(_.io.block := false.B)
}
(io_debug.va_stall zip all_input_units.map(_.io.debug.va_stall)).map { case (l,r) => l := r }
(io_debug.sa_stall zip all_input_units.map(_.io.debug.sa_stall)).map { case (l,r) => l := r }
val debug_tsc = RegInit(0.U(64.W))
debug_tsc := debug_tsc + 1.U
val debug_sample = RegInit(0.U(64.W))
debug_sample := debug_sample + 1.U
val sample_rate = PlusArg("noc_util_sample_rate", width=20)
when (debug_sample === sample_rate - 1.U) { debug_sample := 0.U }
def sample(fire: Bool, s: String) = {
val util_ctr = RegInit(0.U(64.W))
val fired = RegInit(false.B)
util_ctr := util_ctr + fire
fired := fired || fire
when (sample_rate =/= 0.U && debug_sample === sample_rate - 1.U && fired) {
val fmtStr = s"nocsample %d $s %d\n"
printf(fmtStr, debug_tsc, util_ctr);
fired := fire
}
}
destNodes.map(_.in(0)).foreach { case (in, edge) => in.flit.map { f =>
sample(f.fire, s"${edge.cp.srcId} $nodeId")
} }
ingressNodes.map(_.in(0)).foreach { case (in, edge) =>
sample(in.flit.fire, s"i${edge.cp.asInstanceOf[IngressChannelParams].ingressId} $nodeId")
}
egressNodes.map(_.out(0)).foreach { case (out, edge) =>
sample(out.flit.fire, s"$nodeId e${edge.cp.asInstanceOf[EgressChannelParams].egressId}")
}
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
| module Router_1( // @[Router.scala:89:25]
input clock, // @[Router.scala:89:25]
input reset, // @[Router.scala:89:25]
output [4:0] auto_debug_out_va_stall_0, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_debug_out_va_stall_1, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_debug_out_va_stall_4, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_debug_out_va_stall_5, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_debug_out_va_stall_6, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_debug_out_va_stall_7, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_debug_out_va_stall_8, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_debug_out_sa_stall_0, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_debug_out_sa_stall_1, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_debug_out_sa_stall_4, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_debug_out_sa_stall_5, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_debug_out_sa_stall_6, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_debug_out_sa_stall_7, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_debug_out_sa_stall_8, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_6_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_6_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_6_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_egress_nodes_out_6_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input auto_egress_nodes_out_5_flit_ready, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_5_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_5_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_5_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_egress_nodes_out_5_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input auto_egress_nodes_out_4_flit_ready, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_4_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_4_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_4_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_egress_nodes_out_4_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input auto_egress_nodes_out_3_flit_ready, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_3_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_3_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_3_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_egress_nodes_out_3_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input auto_egress_nodes_out_2_flit_ready, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_2_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_2_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_2_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_egress_nodes_out_2_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input auto_egress_nodes_out_1_flit_ready, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_1_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_1_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_1_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_egress_nodes_out_1_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input auto_egress_nodes_out_0_flit_ready, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_0_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_0_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_0_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
output auto_ingress_nodes_in_7_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_7_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_7_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_7_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_ingress_nodes_in_7_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_ingress_nodes_in_7_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
output auto_ingress_nodes_in_6_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_6_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_6_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_6_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_ingress_nodes_in_6_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_ingress_nodes_in_6_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
output auto_ingress_nodes_in_5_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_5_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_5_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_ingress_nodes_in_5_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_ingress_nodes_in_5_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
output auto_ingress_nodes_in_4_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_4_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_4_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_4_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_ingress_nodes_in_4_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_ingress_nodes_in_4_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
output auto_ingress_nodes_in_3_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_3_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_3_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_3_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_ingress_nodes_in_3_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_ingress_nodes_in_3_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
output auto_ingress_nodes_in_0_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_0_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_0_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_0_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_ingress_nodes_in_0_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_ingress_nodes_in_0_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_flit_0_valid, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_source_nodes_out_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_source_nodes_out_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_source_nodes_out_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_source_nodes_out_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_source_nodes_out_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_source_nodes_out_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
input [21:0] auto_source_nodes_out_credit_return, // @[LazyModuleImp.scala:107:25]
input [21:0] auto_source_nodes_out_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_flit_0_valid, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_dest_nodes_in_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_dest_nodes_in_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dest_nodes_in_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_dest_nodes_in_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dest_nodes_in_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_dest_nodes_in_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
output [21:0] auto_dest_nodes_in_credit_return, // @[LazyModuleImp.scala:107:25]
output [21:0] auto_dest_nodes_in_vc_free // @[LazyModuleImp.scala:107:25]
);
wire [19:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire _vc_allocator_io_req_8_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_req_7_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_req_6_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_req_5_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_req_4_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_req_1_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_req_0_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_7_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_6_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_5_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_4_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_3_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_2_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_1_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_0_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_0_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_0_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_0_3; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_0_4; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_0_5; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_0_6; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_0_7; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_0_8; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_0_9; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_0_10; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_0_11; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_0_12; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_0_13; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_0_14; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_0_15; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_0_16; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_0_17; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_0_18; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_0_19; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_0_20; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_8_vc_sel_0_21; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_7_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_6_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_5_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_4_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_3_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_2_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_1_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_0_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_0_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_0_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_0_3; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_0_4; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_0_5; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_0_6; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_0_7; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_0_8; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_0_9; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_0_10; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_0_11; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_0_12; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_0_13; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_0_14; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_0_15; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_0_16; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_0_17; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_0_18; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_0_19; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_0_20; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_7_vc_sel_0_21; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_7_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_6_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_5_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_4_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_3_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_2_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_1_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_0_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_0_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_0_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_0_3; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_0_4; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_0_5; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_0_6; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_0_7; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_0_8; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_0_9; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_0_10; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_0_11; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_0_12; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_0_13; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_0_14; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_0_15; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_0_16; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_0_17; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_0_18; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_0_19; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_0_20; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_6_vc_sel_0_21; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_7_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_6_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_5_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_4_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_3_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_2_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_1_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_0_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_0_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_0_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_0_3; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_0_4; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_0_5; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_0_6; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_0_7; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_0_8; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_0_9; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_0_10; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_0_11; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_0_12; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_0_13; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_0_14; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_0_15; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_0_16; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_0_17; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_0_18; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_0_19; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_0_20; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_5_vc_sel_0_21; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_7_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_6_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_5_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_4_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_3_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_2_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_1_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_3; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_4; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_5; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_6; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_7; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_8; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_9; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_10; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_11; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_12; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_13; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_14; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_15; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_16; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_17; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_18; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_19; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_20; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_21; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_7_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_6_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_5_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_4_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_3_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_2_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_1_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_3; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_4; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_5; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_6; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_7; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_8; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_9; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_10; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_11; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_12; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_13; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_14; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_15; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_16; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_17; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_18; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_19; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_20; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_21; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_7_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_6_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_5_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_4_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_3_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_2_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_1_0; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_7_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_6_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_5_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_4_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_3_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_2_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_1_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_8_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_9_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_10_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_11_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_12_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_13_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_14_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_15_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_16_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_17_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_18_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_19_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_20_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_21_alloc; // @[Router.scala:133:30]
wire _switch_allocator_io_req_8_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_req_7_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_req_6_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_req_5_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_req_4_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_req_1_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_req_0_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_7_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_7_0_tail; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_6_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_6_0_tail; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_5_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_5_0_tail; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_4_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_4_0_tail; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_3_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_3_0_tail; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_2_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_2_0_tail; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_1_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_1_0_tail; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_8_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_9_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_10_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_11_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_12_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_13_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_14_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_15_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_16_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_17_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_18_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_19_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_20_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_21_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_7_0_8_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_7_0_7_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_7_0_6_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_7_0_5_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_7_0_4_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_7_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_7_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_6_0_8_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_6_0_7_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_6_0_6_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_6_0_5_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_6_0_4_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_6_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_6_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_5_0_8_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_5_0_7_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_5_0_6_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_5_0_5_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_5_0_4_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_5_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_5_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_4_0_8_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_4_0_7_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_4_0_6_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_4_0_5_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_4_0_4_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_4_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_4_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_3_0_8_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_3_0_7_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_3_0_6_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_3_0_5_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_3_0_4_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_3_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_3_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_8_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_7_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_6_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_5_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_4_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_8_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_7_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_6_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_5_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_4_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_8_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_7_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_6_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_5_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_4_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_1_0; // @[Router.scala:132:34]
wire _switch_io_out_7_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_7_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_7_0_bits_tail; // @[Router.scala:131:24]
wire [72:0] _switch_io_out_7_0_bits_payload; // @[Router.scala:131:24]
wire [5:0] _switch_io_out_7_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_7_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire _switch_io_out_6_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_6_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_6_0_bits_tail; // @[Router.scala:131:24]
wire [72:0] _switch_io_out_6_0_bits_payload; // @[Router.scala:131:24]
wire [5:0] _switch_io_out_6_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_6_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire _switch_io_out_5_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_5_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_5_0_bits_tail; // @[Router.scala:131:24]
wire [72:0] _switch_io_out_5_0_bits_payload; // @[Router.scala:131:24]
wire [5:0] _switch_io_out_5_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_5_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire _switch_io_out_4_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_4_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_4_0_bits_tail; // @[Router.scala:131:24]
wire [72:0] _switch_io_out_4_0_bits_payload; // @[Router.scala:131:24]
wire [5:0] _switch_io_out_4_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_4_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire _switch_io_out_3_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_3_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_3_0_bits_tail; // @[Router.scala:131:24]
wire [72:0] _switch_io_out_3_0_bits_payload; // @[Router.scala:131:24]
wire [5:0] _switch_io_out_3_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_3_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire _switch_io_out_2_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_2_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_2_0_bits_tail; // @[Router.scala:131:24]
wire [72:0] _switch_io_out_2_0_bits_payload; // @[Router.scala:131:24]
wire [5:0] _switch_io_out_2_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_2_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire _switch_io_out_1_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_1_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_1_0_bits_tail; // @[Router.scala:131:24]
wire [72:0] _switch_io_out_1_0_bits_payload; // @[Router.scala:131:24]
wire _switch_io_out_0_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_0_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_0_0_bits_tail; // @[Router.scala:131:24]
wire [72:0] _switch_io_out_0_0_bits_payload; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_0_0_bits_flow_vnet_id; // @[Router.scala:131:24]
wire [5:0] _switch_io_out_0_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_0_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire [5:0] _switch_io_out_0_0_bits_flow_egress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_0_0_bits_flow_egress_node_id; // @[Router.scala:131:24]
wire [4:0] _switch_io_out_0_0_bits_virt_channel_id; // @[Router.scala:131:24]
wire _egress_unit_7_to_43_io_credit_available_0; // @[Router.scala:125:13]
wire _egress_unit_7_to_43_io_channel_status_0_occupied; // @[Router.scala:125:13]
wire _egress_unit_7_to_43_io_out_valid; // @[Router.scala:125:13]
wire _egress_unit_6_to_42_io_credit_available_0; // @[Router.scala:125:13]
wire _egress_unit_6_to_42_io_channel_status_0_occupied; // @[Router.scala:125:13]
wire _egress_unit_6_to_42_io_out_valid; // @[Router.scala:125:13]
wire _egress_unit_5_to_41_io_credit_available_0; // @[Router.scala:125:13]
wire _egress_unit_5_to_41_io_channel_status_0_occupied; // @[Router.scala:125:13]
wire _egress_unit_5_to_41_io_out_valid; // @[Router.scala:125:13]
wire _egress_unit_4_to_20_io_credit_available_0; // @[Router.scala:125:13]
wire _egress_unit_4_to_20_io_channel_status_0_occupied; // @[Router.scala:125:13]
wire _egress_unit_4_to_20_io_out_valid; // @[Router.scala:125:13]
wire _egress_unit_3_to_19_io_credit_available_0; // @[Router.scala:125:13]
wire _egress_unit_3_to_19_io_channel_status_0_occupied; // @[Router.scala:125:13]
wire _egress_unit_3_to_19_io_out_valid; // @[Router.scala:125:13]
wire _egress_unit_2_to_3_io_credit_available_0; // @[Router.scala:125:13]
wire _egress_unit_2_to_3_io_channel_status_0_occupied; // @[Router.scala:125:13]
wire _egress_unit_2_to_3_io_out_valid; // @[Router.scala:125:13]
wire _egress_unit_1_to_2_io_credit_available_0; // @[Router.scala:125:13]
wire _egress_unit_1_to_2_io_channel_status_0_occupied; // @[Router.scala:125:13]
wire _egress_unit_1_to_2_io_out_valid; // @[Router.scala:125:13]
wire _output_unit_0_to_22_io_credit_available_8; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_credit_available_9; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_credit_available_10; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_credit_available_11; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_credit_available_12; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_credit_available_13; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_credit_available_14; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_credit_available_15; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_credit_available_16; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_credit_available_17; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_credit_available_18; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_credit_available_19; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_credit_available_20; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_credit_available_21; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_channel_status_8_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_channel_status_9_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_channel_status_10_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_channel_status_11_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_channel_status_12_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_channel_status_13_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_channel_status_14_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_channel_status_15_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_channel_status_16_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_channel_status_17_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_channel_status_18_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_channel_status_19_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_channel_status_20_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_22_io_channel_status_21_occupied; // @[Router.scala:122:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_valid; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_7_0; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_6_0; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_5_0; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_4_0; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_3_0; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_0_10; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_0_11; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_0_12; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_0_13; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_0_14; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_0_15; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_0_16; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_0_17; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_0_18; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_0_19; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_0_20; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_vcalloc_req_bits_vc_sel_0_21; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_7_0; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_6_0; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_5_0; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_4_0; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_3_0; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_0_10; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_0_11; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_0_12; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_0_13; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_0_14; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_0_15; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_0_16; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_0_17; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_0_18; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_0_19; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_0_20; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_vc_sel_0_21; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_salloc_req_0_bits_tail; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_out_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_out_0_bits_flit_head; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_out_0_bits_flit_tail; // @[Router.scala:116:13]
wire [72:0] _ingress_unit_8_from_50_io_out_0_bits_flit_payload; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_8_from_50_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:116:13]
wire [5:0] _ingress_unit_8_from_50_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_8_from_50_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:116:13]
wire [5:0] _ingress_unit_8_from_50_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_8_from_50_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:116:13]
wire [4:0] _ingress_unit_8_from_50_io_out_0_bits_out_virt_channel; // @[Router.scala:116:13]
wire _ingress_unit_8_from_50_io_in_ready; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_valid; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_7_0; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_6_0; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_5_0; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_4_0; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_3_0; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_0_10; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_0_11; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_0_12; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_0_13; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_0_14; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_0_15; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_0_16; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_0_17; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_0_18; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_0_19; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_0_20; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_vcalloc_req_bits_vc_sel_0_21; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_7_0; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_6_0; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_5_0; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_4_0; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_3_0; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_0_10; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_0_11; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_0_12; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_0_13; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_0_14; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_0_15; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_0_16; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_0_17; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_0_18; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_0_19; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_0_20; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_vc_sel_0_21; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_salloc_req_0_bits_tail; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_out_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_out_0_bits_flit_head; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_out_0_bits_flit_tail; // @[Router.scala:116:13]
wire [72:0] _ingress_unit_7_from_49_io_out_0_bits_flit_payload; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_7_from_49_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:116:13]
wire [5:0] _ingress_unit_7_from_49_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_7_from_49_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:116:13]
wire [5:0] _ingress_unit_7_from_49_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_7_from_49_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:116:13]
wire [4:0] _ingress_unit_7_from_49_io_out_0_bits_out_virt_channel; // @[Router.scala:116:13]
wire _ingress_unit_7_from_49_io_in_ready; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_valid; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_7_0; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_6_0; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_5_0; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_4_0; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_3_0; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_0_10; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_0_11; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_0_12; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_0_13; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_0_14; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_0_15; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_0_16; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_0_17; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_0_18; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_0_19; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_0_20; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_vcalloc_req_bits_vc_sel_0_21; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_7_0; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_6_0; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_5_0; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_4_0; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_3_0; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_0_10; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_0_11; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_0_12; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_0_13; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_0_14; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_0_15; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_0_16; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_0_17; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_0_18; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_0_19; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_0_20; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_vc_sel_0_21; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_salloc_req_0_bits_tail; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_out_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_out_0_bits_flit_head; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_out_0_bits_flit_tail; // @[Router.scala:116:13]
wire [72:0] _ingress_unit_6_from_23_io_out_0_bits_flit_payload; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_6_from_23_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:116:13]
wire [5:0] _ingress_unit_6_from_23_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_6_from_23_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:116:13]
wire [5:0] _ingress_unit_6_from_23_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_6_from_23_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:116:13]
wire [4:0] _ingress_unit_6_from_23_io_out_0_bits_out_virt_channel; // @[Router.scala:116:13]
wire _ingress_unit_6_from_23_io_in_ready; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_valid; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_7_0; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_6_0; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_5_0; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_4_0; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_3_0; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_0_10; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_0_11; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_0_12; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_0_13; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_0_14; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_0_15; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_0_16; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_0_17; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_0_18; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_0_19; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_0_20; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_vcalloc_req_bits_vc_sel_0_21; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_7_0; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_6_0; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_5_0; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_4_0; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_3_0; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_0_10; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_0_11; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_0_12; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_0_13; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_0_14; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_0_15; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_0_16; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_0_17; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_0_18; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_0_19; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_0_20; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_vc_sel_0_21; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_salloc_req_0_bits_tail; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_out_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_out_0_bits_flit_head; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_out_0_bits_flit_tail; // @[Router.scala:116:13]
wire [72:0] _ingress_unit_5_from_22_io_out_0_bits_flit_payload; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_5_from_22_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:116:13]
wire [5:0] _ingress_unit_5_from_22_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_5_from_22_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:116:13]
wire [5:0] _ingress_unit_5_from_22_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_5_from_22_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:116:13]
wire [4:0] _ingress_unit_5_from_22_io_out_0_bits_out_virt_channel; // @[Router.scala:116:13]
wire _ingress_unit_5_from_22_io_in_ready; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_valid; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_7_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_6_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_5_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_4_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_3_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_0_10; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_0_11; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_0_12; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_0_13; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_0_14; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_0_15; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_0_16; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_0_17; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_0_18; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_0_19; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_0_20; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_vcalloc_req_bits_vc_sel_0_21; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_7_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_6_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_5_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_4_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_3_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_0_10; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_0_11; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_0_12; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_0_13; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_0_14; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_0_15; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_0_16; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_0_17; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_0_18; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_0_19; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_0_20; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_vc_sel_0_21; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_salloc_req_0_bits_tail; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_out_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_out_0_bits_flit_head; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_out_0_bits_flit_tail; // @[Router.scala:116:13]
wire [72:0] _ingress_unit_4_from_21_io_out_0_bits_flit_payload; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_4_from_21_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:116:13]
wire [5:0] _ingress_unit_4_from_21_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_4_from_21_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:116:13]
wire [5:0] _ingress_unit_4_from_21_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_4_from_21_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:116:13]
wire [4:0] _ingress_unit_4_from_21_io_out_0_bits_out_virt_channel; // @[Router.scala:116:13]
wire _ingress_unit_4_from_21_io_in_ready; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_valid; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_7_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_6_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_5_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_4_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_3_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_0_10; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_0_11; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_0_12; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_0_13; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_0_14; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_0_15; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_0_16; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_0_17; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_0_18; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_0_19; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_0_20; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_vcalloc_req_bits_vc_sel_0_21; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_7_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_6_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_5_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_4_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_3_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_0_10; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_0_11; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_0_12; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_0_13; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_0_14; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_0_15; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_0_16; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_0_17; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_0_18; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_0_19; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_0_20; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_vc_sel_0_21; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_salloc_req_0_bits_tail; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_out_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_out_0_bits_flit_head; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_out_0_bits_flit_tail; // @[Router.scala:116:13]
wire [72:0] _ingress_unit_1_from_3_io_out_0_bits_flit_payload; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_1_from_3_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:116:13]
wire [5:0] _ingress_unit_1_from_3_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_1_from_3_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:116:13]
wire [5:0] _ingress_unit_1_from_3_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_1_from_3_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:116:13]
wire [4:0] _ingress_unit_1_from_3_io_out_0_bits_out_virt_channel; // @[Router.scala:116:13]
wire _ingress_unit_1_from_3_io_in_ready; // @[Router.scala:116:13]
wire _input_unit_0_from_22_io_vcalloc_req_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_22_io_vcalloc_req_bits_vc_sel_7_0; // @[Router.scala:112:13]
wire _input_unit_0_from_22_io_vcalloc_req_bits_vc_sel_6_0; // @[Router.scala:112:13]
wire _input_unit_0_from_22_io_vcalloc_req_bits_vc_sel_5_0; // @[Router.scala:112:13]
wire _input_unit_0_from_22_io_vcalloc_req_bits_vc_sel_4_0; // @[Router.scala:112:13]
wire _input_unit_0_from_22_io_vcalloc_req_bits_vc_sel_3_0; // @[Router.scala:112:13]
wire _input_unit_0_from_22_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:112:13]
wire _input_unit_0_from_22_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_0_from_22_io_salloc_req_0_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_22_io_salloc_req_0_bits_vc_sel_7_0; // @[Router.scala:112:13]
wire _input_unit_0_from_22_io_salloc_req_0_bits_vc_sel_6_0; // @[Router.scala:112:13]
wire _input_unit_0_from_22_io_salloc_req_0_bits_vc_sel_5_0; // @[Router.scala:112:13]
wire _input_unit_0_from_22_io_salloc_req_0_bits_vc_sel_4_0; // @[Router.scala:112:13]
wire _input_unit_0_from_22_io_salloc_req_0_bits_vc_sel_3_0; // @[Router.scala:112:13]
wire _input_unit_0_from_22_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:112:13]
wire _input_unit_0_from_22_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_0_from_22_io_salloc_req_0_bits_tail; // @[Router.scala:112:13]
wire _input_unit_0_from_22_io_out_0_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_22_io_out_0_bits_flit_head; // @[Router.scala:112:13]
wire _input_unit_0_from_22_io_out_0_bits_flit_tail; // @[Router.scala:112:13]
wire [72:0] _input_unit_0_from_22_io_out_0_bits_flit_payload; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_22_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:112:13]
wire [5:0] _input_unit_0_from_22_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_0_from_22_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:112:13]
wire [5:0] _input_unit_0_from_22_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_0_from_22_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:112:13]
wire [3:0] fires_count = {2'h0, {1'h0, _vc_allocator_io_req_0_ready & _input_unit_0_from_22_io_vcalloc_req_valid} + {1'h0, _vc_allocator_io_req_1_ready & _ingress_unit_1_from_3_io_vcalloc_req_valid}} + {1'h0, {1'h0, {1'h0, _vc_allocator_io_req_4_ready & _ingress_unit_4_from_21_io_vcalloc_req_valid} + {1'h0, _vc_allocator_io_req_5_ready & _ingress_unit_5_from_22_io_vcalloc_req_valid}} + {1'h0, {1'h0, _vc_allocator_io_req_6_ready & _ingress_unit_6_from_23_io_vcalloc_req_valid} + {1'h0, _vc_allocator_io_req_7_ready & _ingress_unit_7_from_49_io_vcalloc_req_valid} + {1'h0, _vc_allocator_io_req_8_ready & _ingress_unit_8_from_50_io_vcalloc_req_valid}}}; // @[Decoupled.scala:51:35]
reg REG_7_0_8_0; // @[Router.scala:178:14]
reg REG_7_0_7_0; // @[Router.scala:178:14]
reg REG_7_0_6_0; // @[Router.scala:178:14]
reg REG_7_0_5_0; // @[Router.scala:178:14]
reg REG_7_0_4_0; // @[Router.scala:178:14]
reg REG_7_0_1_0; // @[Router.scala:178:14]
reg REG_7_0_0_0; // @[Router.scala:178:14]
reg REG_6_0_8_0; // @[Router.scala:178:14]
reg REG_6_0_7_0; // @[Router.scala:178:14]
reg REG_6_0_6_0; // @[Router.scala:178:14]
reg REG_6_0_5_0; // @[Router.scala:178:14]
reg REG_6_0_4_0; // @[Router.scala:178:14]
reg REG_6_0_1_0; // @[Router.scala:178:14]
reg REG_6_0_0_0; // @[Router.scala:178:14]
reg REG_5_0_8_0; // @[Router.scala:178:14]
reg REG_5_0_7_0; // @[Router.scala:178:14]
reg REG_5_0_6_0; // @[Router.scala:178:14]
reg REG_5_0_5_0; // @[Router.scala:178:14]
reg REG_5_0_4_0; // @[Router.scala:178:14]
reg REG_5_0_1_0; // @[Router.scala:178:14]
reg REG_5_0_0_0; // @[Router.scala:178:14]
reg REG_4_0_8_0; // @[Router.scala:178:14]
reg REG_4_0_7_0; // @[Router.scala:178:14]
reg REG_4_0_6_0; // @[Router.scala:178:14]
reg REG_4_0_5_0; // @[Router.scala:178:14]
reg REG_4_0_4_0; // @[Router.scala:178:14]
reg REG_4_0_1_0; // @[Router.scala:178:14]
reg REG_4_0_0_0; // @[Router.scala:178:14]
reg REG_3_0_8_0; // @[Router.scala:178:14]
reg REG_3_0_7_0; // @[Router.scala:178:14]
reg REG_3_0_6_0; // @[Router.scala:178:14]
reg REG_3_0_5_0; // @[Router.scala:178:14]
reg REG_3_0_4_0; // @[Router.scala:178:14]
reg REG_3_0_1_0; // @[Router.scala:178:14]
reg REG_3_0_0_0; // @[Router.scala:178:14]
reg REG_2_0_8_0; // @[Router.scala:178:14]
reg REG_2_0_7_0; // @[Router.scala:178:14]
reg REG_2_0_6_0; // @[Router.scala:178:14]
reg REG_2_0_5_0; // @[Router.scala:178:14]
reg REG_2_0_4_0; // @[Router.scala:178:14]
reg REG_2_0_1_0; // @[Router.scala:178:14]
reg REG_2_0_0_0; // @[Router.scala:178:14]
reg REG_1_0_8_0; // @[Router.scala:178:14]
reg REG_1_0_7_0; // @[Router.scala:178:14]
reg REG_1_0_6_0; // @[Router.scala:178:14]
reg REG_1_0_5_0; // @[Router.scala:178:14]
reg REG_1_0_4_0; // @[Router.scala:178:14]
reg REG_1_0_1_0; // @[Router.scala:178:14]
reg REG_1_0_0_0; // @[Router.scala:178:14]
reg REG_0_0_8_0; // @[Router.scala:178:14]
reg REG_0_0_7_0; // @[Router.scala:178:14]
reg REG_0_0_6_0; // @[Router.scala:178:14]
reg REG_0_0_5_0; // @[Router.scala:178:14]
reg REG_0_0_4_0; // @[Router.scala:178:14]
reg REG_0_0_1_0; // @[Router.scala:178:14]
reg [63:0] debug_tsc; // @[Router.scala:195:28]
reg [63:0] debug_sample; // @[Router.scala:197:31]
wire _GEN = debug_sample == {44'h0, _plusarg_reader_out - 20'h1}; // @[PlusArg.scala:80:11]
reg [63:0] util_ctr; // @[Router.scala:203:29]
reg fired; // @[Router.scala:204:26]
wire _GEN_0 = (|_plusarg_reader_out) & _GEN; // @[PlusArg.scala:80:11]
wire _GEN_1 = _GEN_0 & fired; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_1; // @[Router.scala:203:29]
reg fired_1; // @[Router.scala:204:26]
wire _GEN_2 = _GEN_0 & fired_1; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_4; // @[Router.scala:203:29]
reg fired_4; // @[Router.scala:204:26]
wire _GEN_3 = _GEN_0 & fired_4; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_5; // @[Router.scala:203:29]
reg fired_5; // @[Router.scala:204:26]
wire _GEN_4 = _GEN_0 & fired_5; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_6; // @[Router.scala:203:29]
reg fired_6; // @[Router.scala:204:26]
wire _GEN_5 = _GEN_0 & fired_6; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_7; // @[Router.scala:203:29]
reg fired_7; // @[Router.scala:204:26]
wire _GEN_6 = _GEN_0 & fired_7; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_8; // @[Router.scala:203:29]
reg fired_8; // @[Router.scala:204:26]
wire _GEN_7 = _GEN_0 & fired_8; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_9; // @[Router.scala:203:29]
reg fired_9; // @[Router.scala:204:26]
wire _GEN_8 = _GEN_0 & fired_9; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_10; // @[Router.scala:203:29]
reg fired_10; // @[Router.scala:204:26]
wire _GEN_9 = _GEN_0 & fired_10; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_11; // @[Router.scala:203:29]
reg fired_11; // @[Router.scala:204:26]
wire _GEN_10 = _GEN_0 & fired_11; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_12; // @[Router.scala:203:29]
reg fired_12; // @[Router.scala:204:26]
wire _GEN_11 = _GEN_0 & fired_12; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_13; // @[Router.scala:203:29]
reg fired_13; // @[Router.scala:204:26]
wire _GEN_12 = _GEN_0 & fired_13; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_14; // @[Router.scala:203:29]
reg fired_14; // @[Router.scala:204:26]
wire _GEN_13 = _GEN_0 & fired_14; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_15; // @[Router.scala:203:29]
reg fired_15; // @[Router.scala:204:26]
wire _GEN_14 = _GEN_0 & fired_15; // @[Router.scala:204:26, :207:{33,71}] |
Generate the Verilog code corresponding to the following Chisel files.
File util.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v3.common.{MicroOp}
import boom.v3.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U}
def apply(ip: UInt, isel: UInt): SInt = {
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt
}
}
/**
* Object to get the FP rounding mode out of a packed immediate.
*/
object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } }
/**
* Object to get the FP function fype from a packed immediate.
* Note: only works if !(IS_B or IS_S)
*/
object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } }
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v3.common.BoomModule()(p)
with boom.v3.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire)
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop)
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop))
io.deq.bits := out
io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop)
// For flow queue behavior.
if (flow) {
when (io.empty) {
io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop)
io.deq.bits := io.enq.bits
io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
do_deq := false.B
when (io.deq.ready) { do_enq := false.B }
}
}
private val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
}
File consts.scala:
//******************************************************************************
// Copyright (c) 2011 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// RISCV Processor Constants
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.common.constants
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util.Str
import freechips.rocketchip.rocket.RVCExpander
/**
* Mixin for issue queue types
*/
trait IQType
{
val IQT_SZ = 3
val IQT_INT = 1.U(IQT_SZ.W)
val IQT_MEM = 2.U(IQT_SZ.W)
val IQT_FP = 4.U(IQT_SZ.W)
val IQT_MFP = 6.U(IQT_SZ.W)
}
/**
* Mixin for scalar operation constants
*/
trait ScalarOpConstants
{
val X = BitPat("b?")
val Y = BitPat("b1")
val N = BitPat("b0")
//************************************
// Extra Constants
// Which branch predictor predicted us
val BSRC_SZ = 2
val BSRC_1 = 0.U(BSRC_SZ.W) // 1-cycle branch pred
val BSRC_2 = 1.U(BSRC_SZ.W) // 2-cycle branch pred
val BSRC_3 = 2.U(BSRC_SZ.W) // 3-cycle branch pred
val BSRC_C = 3.U(BSRC_SZ.W) // core branch resolution
//************************************
// Control Signals
// CFI types
val CFI_SZ = 3
val CFI_X = 0.U(CFI_SZ.W) // Not a CFI instruction
val CFI_BR = 1.U(CFI_SZ.W) // Branch
val CFI_JAL = 2.U(CFI_SZ.W) // JAL
val CFI_JALR = 3.U(CFI_SZ.W) // JALR
// PC Select Signal
val PC_PLUS4 = 0.U(2.W) // PC + 4
val PC_BRJMP = 1.U(2.W) // brjmp_target
val PC_JALR = 2.U(2.W) // jump_reg_target
// Branch Type
val BR_N = 0.U(4.W) // Next
val BR_NE = 1.U(4.W) // Branch on NotEqual
val BR_EQ = 2.U(4.W) // Branch on Equal
val BR_GE = 3.U(4.W) // Branch on Greater/Equal
val BR_GEU = 4.U(4.W) // Branch on Greater/Equal Unsigned
val BR_LT = 5.U(4.W) // Branch on Less Than
val BR_LTU = 6.U(4.W) // Branch on Less Than Unsigned
val BR_J = 7.U(4.W) // Jump
val BR_JR = 8.U(4.W) // Jump Register
// RS1 Operand Select Signal
val OP1_RS1 = 0.U(2.W) // Register Source #1
val OP1_ZERO= 1.U(2.W)
val OP1_PC = 2.U(2.W)
val OP1_X = BitPat("b??")
// RS2 Operand Select Signal
val OP2_RS2 = 0.U(3.W) // Register Source #2
val OP2_IMM = 1.U(3.W) // immediate
val OP2_ZERO= 2.U(3.W) // constant 0
val OP2_NEXT= 3.U(3.W) // constant 2/4 (for PC+2/4)
val OP2_IMMC= 4.U(3.W) // for CSR imm found in RS1
val OP2_X = BitPat("b???")
// Register File Write Enable Signal
val REN_0 = false.B
val REN_1 = true.B
// Is 32b Word or 64b Doubldword?
val SZ_DW = 1
val DW_X = true.B // Bool(xLen==64)
val DW_32 = false.B
val DW_64 = true.B
val DW_XPR = true.B // Bool(xLen==64)
// Memory Enable Signal
val MEN_0 = false.B
val MEN_1 = true.B
val MEN_X = false.B
// Immediate Extend Select
val IS_I = 0.U(3.W) // I-Type (LD,ALU)
val IS_S = 1.U(3.W) // S-Type (ST)
val IS_B = 2.U(3.W) // SB-Type (BR)
val IS_U = 3.U(3.W) // U-Type (LUI/AUIPC)
val IS_J = 4.U(3.W) // UJ-Type (J/JAL)
val IS_X = BitPat("b???")
// Decode Stage Control Signals
val RT_FIX = 0.U(2.W)
val RT_FLT = 1.U(2.W)
val RT_PAS = 3.U(2.W) // pass-through (prs1 := lrs1, etc)
val RT_X = 2.U(2.W) // not-a-register (but shouldn't get a busy-bit, etc.)
// TODO rename RT_NAR
// Micro-op opcodes
// TODO change micro-op opcodes into using enum
val UOPC_SZ = 7
val uopX = BitPat.dontCare(UOPC_SZ)
val uopNOP = 0.U(UOPC_SZ.W)
val uopLD = 1.U(UOPC_SZ.W)
val uopSTA = 2.U(UOPC_SZ.W) // store address generation
val uopSTD = 3.U(UOPC_SZ.W) // store data generation
val uopLUI = 4.U(UOPC_SZ.W)
val uopADDI = 5.U(UOPC_SZ.W)
val uopANDI = 6.U(UOPC_SZ.W)
val uopORI = 7.U(UOPC_SZ.W)
val uopXORI = 8.U(UOPC_SZ.W)
val uopSLTI = 9.U(UOPC_SZ.W)
val uopSLTIU= 10.U(UOPC_SZ.W)
val uopSLLI = 11.U(UOPC_SZ.W)
val uopSRAI = 12.U(UOPC_SZ.W)
val uopSRLI = 13.U(UOPC_SZ.W)
val uopSLL = 14.U(UOPC_SZ.W)
val uopADD = 15.U(UOPC_SZ.W)
val uopSUB = 16.U(UOPC_SZ.W)
val uopSLT = 17.U(UOPC_SZ.W)
val uopSLTU = 18.U(UOPC_SZ.W)
val uopAND = 19.U(UOPC_SZ.W)
val uopOR = 20.U(UOPC_SZ.W)
val uopXOR = 21.U(UOPC_SZ.W)
val uopSRA = 22.U(UOPC_SZ.W)
val uopSRL = 23.U(UOPC_SZ.W)
val uopBEQ = 24.U(UOPC_SZ.W)
val uopBNE = 25.U(UOPC_SZ.W)
val uopBGE = 26.U(UOPC_SZ.W)
val uopBGEU = 27.U(UOPC_SZ.W)
val uopBLT = 28.U(UOPC_SZ.W)
val uopBLTU = 29.U(UOPC_SZ.W)
val uopCSRRW= 30.U(UOPC_SZ.W)
val uopCSRRS= 31.U(UOPC_SZ.W)
val uopCSRRC= 32.U(UOPC_SZ.W)
val uopCSRRWI=33.U(UOPC_SZ.W)
val uopCSRRSI=34.U(UOPC_SZ.W)
val uopCSRRCI=35.U(UOPC_SZ.W)
val uopJ = 36.U(UOPC_SZ.W)
val uopJAL = 37.U(UOPC_SZ.W)
val uopJALR = 38.U(UOPC_SZ.W)
val uopAUIPC= 39.U(UOPC_SZ.W)
//val uopSRET = 40.U(UOPC_SZ.W)
val uopCFLSH= 41.U(UOPC_SZ.W)
val uopFENCE= 42.U(UOPC_SZ.W)
val uopADDIW= 43.U(UOPC_SZ.W)
val uopADDW = 44.U(UOPC_SZ.W)
val uopSUBW = 45.U(UOPC_SZ.W)
val uopSLLIW= 46.U(UOPC_SZ.W)
val uopSLLW = 47.U(UOPC_SZ.W)
val uopSRAIW= 48.U(UOPC_SZ.W)
val uopSRAW = 49.U(UOPC_SZ.W)
val uopSRLIW= 50.U(UOPC_SZ.W)
val uopSRLW = 51.U(UOPC_SZ.W)
val uopMUL = 52.U(UOPC_SZ.W)
val uopMULH = 53.U(UOPC_SZ.W)
val uopMULHU= 54.U(UOPC_SZ.W)
val uopMULHSU=55.U(UOPC_SZ.W)
val uopMULW = 56.U(UOPC_SZ.W)
val uopDIV = 57.U(UOPC_SZ.W)
val uopDIVU = 58.U(UOPC_SZ.W)
val uopREM = 59.U(UOPC_SZ.W)
val uopREMU = 60.U(UOPC_SZ.W)
val uopDIVW = 61.U(UOPC_SZ.W)
val uopDIVUW= 62.U(UOPC_SZ.W)
val uopREMW = 63.U(UOPC_SZ.W)
val uopREMUW= 64.U(UOPC_SZ.W)
val uopFENCEI = 65.U(UOPC_SZ.W)
// = 66.U(UOPC_SZ.W)
val uopAMO_AG = 67.U(UOPC_SZ.W) // AMO-address gen (use normal STD for datagen)
val uopFMV_W_X = 68.U(UOPC_SZ.W)
val uopFMV_D_X = 69.U(UOPC_SZ.W)
val uopFMV_X_W = 70.U(UOPC_SZ.W)
val uopFMV_X_D = 71.U(UOPC_SZ.W)
val uopFSGNJ_S = 72.U(UOPC_SZ.W)
val uopFSGNJ_D = 73.U(UOPC_SZ.W)
val uopFCVT_S_D = 74.U(UOPC_SZ.W)
val uopFCVT_D_S = 75.U(UOPC_SZ.W)
val uopFCVT_S_X = 76.U(UOPC_SZ.W)
val uopFCVT_D_X = 77.U(UOPC_SZ.W)
val uopFCVT_X_S = 78.U(UOPC_SZ.W)
val uopFCVT_X_D = 79.U(UOPC_SZ.W)
val uopCMPR_S = 80.U(UOPC_SZ.W)
val uopCMPR_D = 81.U(UOPC_SZ.W)
val uopFCLASS_S = 82.U(UOPC_SZ.W)
val uopFCLASS_D = 83.U(UOPC_SZ.W)
val uopFMINMAX_S = 84.U(UOPC_SZ.W)
val uopFMINMAX_D = 85.U(UOPC_SZ.W)
// = 86.U(UOPC_SZ.W)
val uopFADD_S = 87.U(UOPC_SZ.W)
val uopFSUB_S = 88.U(UOPC_SZ.W)
val uopFMUL_S = 89.U(UOPC_SZ.W)
val uopFADD_D = 90.U(UOPC_SZ.W)
val uopFSUB_D = 91.U(UOPC_SZ.W)
val uopFMUL_D = 92.U(UOPC_SZ.W)
val uopFMADD_S = 93.U(UOPC_SZ.W)
val uopFMSUB_S = 94.U(UOPC_SZ.W)
val uopFNMADD_S = 95.U(UOPC_SZ.W)
val uopFNMSUB_S = 96.U(UOPC_SZ.W)
val uopFMADD_D = 97.U(UOPC_SZ.W)
val uopFMSUB_D = 98.U(UOPC_SZ.W)
val uopFNMADD_D = 99.U(UOPC_SZ.W)
val uopFNMSUB_D = 100.U(UOPC_SZ.W)
val uopFDIV_S = 101.U(UOPC_SZ.W)
val uopFDIV_D = 102.U(UOPC_SZ.W)
val uopFSQRT_S = 103.U(UOPC_SZ.W)
val uopFSQRT_D = 104.U(UOPC_SZ.W)
val uopWFI = 105.U(UOPC_SZ.W) // pass uop down the CSR pipeline
val uopERET = 106.U(UOPC_SZ.W) // pass uop down the CSR pipeline, also is ERET
val uopSFENCE = 107.U(UOPC_SZ.W)
val uopROCC = 108.U(UOPC_SZ.W)
val uopMOV = 109.U(UOPC_SZ.W) // conditional mov decoded from "add rd, x0, rs2"
// The Bubble Instruction (Machine generated NOP)
// Insert (XOR x0,x0,x0) which is different from software compiler
// generated NOPs which are (ADDI x0, x0, 0).
// Reasoning for this is to let visualizers and stat-trackers differentiate
// between software NOPs and machine-generated Bubbles in the pipeline.
val BUBBLE = (0x4033).U(32.W)
def NullMicroOp()(implicit p: Parameters): boom.v3.common.MicroOp = {
val uop = Wire(new boom.v3.common.MicroOp)
uop := DontCare // Overridden in the following lines
uop.uopc := uopNOP // maybe not required, but helps on asserts that try to catch spurious behavior
uop.bypassable := false.B
uop.fp_val := false.B
uop.uses_stq := false.B
uop.uses_ldq := false.B
uop.pdst := 0.U
uop.dst_rtype := RT_X
val cs = Wire(new boom.v3.common.CtrlSignals())
cs := DontCare // Overridden in the following lines
cs.br_type := BR_N
cs.csr_cmd := freechips.rocketchip.rocket.CSR.N
cs.is_load := false.B
cs.is_sta := false.B
cs.is_std := false.B
uop.ctrl := cs
uop
}
}
/**
* Mixin for RISCV constants
*/
trait RISCVConstants
{
// abstract out instruction decode magic numbers
val RD_MSB = 11
val RD_LSB = 7
val RS1_MSB = 19
val RS1_LSB = 15
val RS2_MSB = 24
val RS2_LSB = 20
val RS3_MSB = 31
val RS3_LSB = 27
val CSR_ADDR_MSB = 31
val CSR_ADDR_LSB = 20
val CSR_ADDR_SZ = 12
// location of the fifth bit in the shamt (for checking for illegal ops for SRAIW,etc.)
val SHAMT_5_BIT = 25
val LONGEST_IMM_SZ = 20
val X0 = 0.U
val RA = 1.U // return address register
// memory consistency model
// The C/C++ atomics MCM requires that two loads to the same address maintain program order.
// The Cortex A9 does NOT enforce load/load ordering (which leads to buggy behavior).
val MCM_ORDER_DEPENDENT_LOADS = true
val jal_opc = (0x6f).U
val jalr_opc = (0x67).U
def GetUop(inst: UInt): UInt = inst(6,0)
def GetRd (inst: UInt): UInt = inst(RD_MSB,RD_LSB)
def GetRs1(inst: UInt): UInt = inst(RS1_MSB,RS1_LSB)
def ExpandRVC(inst: UInt)(implicit p: Parameters): UInt = {
val rvc_exp = Module(new RVCExpander)
rvc_exp.io.in := inst
Mux(rvc_exp.io.rvc, rvc_exp.io.out.bits, inst)
}
// Note: Accepts only EXPANDED rvc instructions
def ComputeBranchTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = {
val b_imm32 = Cat(Fill(20,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
((pc.asSInt + b_imm32.asSInt).asSInt & (-2).S).asUInt
}
// Note: Accepts only EXPANDED rvc instructions
def ComputeJALTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = {
val j_imm32 = Cat(Fill(12,inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
((pc.asSInt + j_imm32.asSInt).asSInt & (-2).S).asUInt
}
// Note: Accepts only EXPANDED rvc instructions
def GetCfiType(inst: UInt)(implicit p: Parameters): UInt = {
val bdecode = Module(new boom.v3.exu.BranchDecode)
bdecode.io.inst := inst
bdecode.io.pc := 0.U
bdecode.io.out.cfi_type
}
}
/**
* Mixin for exception cause constants
*/
trait ExcCauseConstants
{
// a memory disambigious misspeculation occurred
val MINI_EXCEPTION_MEM_ORDERING = 16.U
val MINI_EXCEPTION_CSR_REPLAY = 17.U
require (!freechips.rocketchip.rocket.Causes.all.contains(16))
require (!freechips.rocketchip.rocket.Causes.all.contains(17))
}
File issue-slot.scala:
//******************************************************************************
// Copyright (c) 2015 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// RISCV Processor Issue Slot Logic
//--------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// Note: stores (and AMOs) are "broken down" into 2 uops, but stored within a single issue-slot.
// TODO XXX make a separate issueSlot for MemoryIssueSlots, and only they break apart stores.
// TODO Disable ldspec for FP queue.
package boom.v3.exu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import boom.v3.common._
import boom.v3.util._
import FUConstants._
/**
* IO bundle to interact with Issue slot
*
* @param numWakeupPorts number of wakeup ports for the slot
*/
class IssueSlotIO(val numWakeupPorts: Int)(implicit p: Parameters) extends BoomBundle
{
val valid = Output(Bool())
val will_be_valid = Output(Bool()) // TODO code review, do we need this signal so explicitely?
val request = Output(Bool())
val request_hp = Output(Bool())
val grant = Input(Bool())
val brupdate = Input(new BrUpdateInfo())
val kill = Input(Bool()) // pipeline flush
val clear = Input(Bool()) // entry being moved elsewhere (not mutually exclusive with grant)
val ldspec_miss = Input(Bool()) // Previous cycle's speculative load wakeup was mispredicted.
val wakeup_ports = Flipped(Vec(numWakeupPorts, Valid(new IqWakeup(maxPregSz))))
val pred_wakeup_port = Flipped(Valid(UInt(log2Ceil(ftqSz).W)))
val spec_ld_wakeup = Flipped(Vec(memWidth, Valid(UInt(width=maxPregSz.W))))
val in_uop = Flipped(Valid(new MicroOp())) // if valid, this WILL overwrite an entry!
val out_uop = Output(new MicroOp()) // the updated slot uop; will be shifted upwards in a collasping queue.
val uop = Output(new MicroOp()) // the current Slot's uop. Sent down the pipeline when issued.
val debug = {
val result = new Bundle {
val p1 = Bool()
val p2 = Bool()
val p3 = Bool()
val ppred = Bool()
val state = UInt(width=2.W)
}
Output(result)
}
}
/**
* Single issue slot. Holds a uop within the issue queue
*
* @param numWakeupPorts number of wakeup ports
*/
class IssueSlot(val numWakeupPorts: Int)(implicit p: Parameters)
extends BoomModule
with IssueUnitConstants
{
val io = IO(new IssueSlotIO(numWakeupPorts))
// slot invalid?
// slot is valid, holding 1 uop
// slot is valid, holds 2 uops (like a store)
def is_invalid = state === s_invalid
def is_valid = state =/= s_invalid
val next_state = Wire(UInt()) // the next state of this slot (which might then get moved to a new slot)
val next_uopc = Wire(UInt()) // the next uopc of this slot (which might then get moved to a new slot)
val next_lrs1_rtype = Wire(UInt()) // the next reg type of this slot (which might then get moved to a new slot)
val next_lrs2_rtype = Wire(UInt()) // the next reg type of this slot (which might then get moved to a new slot)
val state = RegInit(s_invalid)
val p1 = RegInit(false.B)
val p2 = RegInit(false.B)
val p3 = RegInit(false.B)
val ppred = RegInit(false.B)
// Poison if woken up by speculative load.
// Poison lasts 1 cycle (as ldMiss will come on the next cycle).
// SO if poisoned is true, set it to false!
val p1_poisoned = RegInit(false.B)
val p2_poisoned = RegInit(false.B)
p1_poisoned := false.B
p2_poisoned := false.B
val next_p1_poisoned = Mux(io.in_uop.valid, io.in_uop.bits.iw_p1_poisoned, p1_poisoned)
val next_p2_poisoned = Mux(io.in_uop.valid, io.in_uop.bits.iw_p2_poisoned, p2_poisoned)
val slot_uop = RegInit(NullMicroOp)
val next_uop = Mux(io.in_uop.valid, io.in_uop.bits, slot_uop)
//-----------------------------------------------------------------------------
// next slot state computation
// compute the next state for THIS entry slot (in a collasping queue, the
// current uop may get moved elsewhere, and a new uop can enter
when (io.kill) {
state := s_invalid
} .elsewhen (io.in_uop.valid) {
state := io.in_uop.bits.iw_state
} .elsewhen (io.clear) {
state := s_invalid
} .otherwise {
state := next_state
}
//-----------------------------------------------------------------------------
// "update" state
// compute the next state for the micro-op in this slot. This micro-op may
// be moved elsewhere, so the "next_state" travels with it.
// defaults
next_state := state
next_uopc := slot_uop.uopc
next_lrs1_rtype := slot_uop.lrs1_rtype
next_lrs2_rtype := slot_uop.lrs2_rtype
when (io.kill) {
next_state := s_invalid
} .elsewhen ((io.grant && (state === s_valid_1)) ||
(io.grant && (state === s_valid_2) && p1 && p2 && ppred)) {
// try to issue this uop.
when (!(io.ldspec_miss && (p1_poisoned || p2_poisoned))) {
next_state := s_invalid
}
} .elsewhen (io.grant && (state === s_valid_2)) {
when (!(io.ldspec_miss && (p1_poisoned || p2_poisoned))) {
next_state := s_valid_1
when (p1) {
slot_uop.uopc := uopSTD
next_uopc := uopSTD
slot_uop.lrs1_rtype := RT_X
next_lrs1_rtype := RT_X
} .otherwise {
slot_uop.lrs2_rtype := RT_X
next_lrs2_rtype := RT_X
}
}
}
when (io.in_uop.valid) {
slot_uop := io.in_uop.bits
assert (is_invalid || io.clear || io.kill, "trying to overwrite a valid issue slot.")
}
// Wakeup Compare Logic
// these signals are the "next_p*" for the current slot's micro-op.
// they are important for shifting the current slot_uop up to an other entry.
val next_p1 = WireInit(p1)
val next_p2 = WireInit(p2)
val next_p3 = WireInit(p3)
val next_ppred = WireInit(ppred)
when (io.in_uop.valid) {
p1 := !(io.in_uop.bits.prs1_busy)
p2 := !(io.in_uop.bits.prs2_busy)
p3 := !(io.in_uop.bits.prs3_busy)
ppred := !(io.in_uop.bits.ppred_busy)
}
when (io.ldspec_miss && next_p1_poisoned) {
assert(next_uop.prs1 =/= 0.U, "Poison bit can't be set for prs1=x0!")
p1 := false.B
}
when (io.ldspec_miss && next_p2_poisoned) {
assert(next_uop.prs2 =/= 0.U, "Poison bit can't be set for prs2=x0!")
p2 := false.B
}
for (i <- 0 until numWakeupPorts) {
when (io.wakeup_ports(i).valid &&
(io.wakeup_ports(i).bits.pdst === next_uop.prs1)) {
p1 := true.B
}
when (io.wakeup_ports(i).valid &&
(io.wakeup_ports(i).bits.pdst === next_uop.prs2)) {
p2 := true.B
}
when (io.wakeup_ports(i).valid &&
(io.wakeup_ports(i).bits.pdst === next_uop.prs3)) {
p3 := true.B
}
}
when (io.pred_wakeup_port.valid && io.pred_wakeup_port.bits === next_uop.ppred) {
ppred := true.B
}
for (w <- 0 until memWidth) {
assert (!(io.spec_ld_wakeup(w).valid && io.spec_ld_wakeup(w).bits === 0.U),
"Loads to x0 should never speculatively wakeup other instructions")
}
// TODO disable if FP IQ.
for (w <- 0 until memWidth) {
when (io.spec_ld_wakeup(w).valid &&
io.spec_ld_wakeup(w).bits === next_uop.prs1 &&
next_uop.lrs1_rtype === RT_FIX) {
p1 := true.B
p1_poisoned := true.B
assert (!next_p1_poisoned)
}
when (io.spec_ld_wakeup(w).valid &&
io.spec_ld_wakeup(w).bits === next_uop.prs2 &&
next_uop.lrs2_rtype === RT_FIX) {
p2 := true.B
p2_poisoned := true.B
assert (!next_p2_poisoned)
}
}
// Handle branch misspeculations
val next_br_mask = GetNewBrMask(io.brupdate, slot_uop)
// was this micro-op killed by a branch? if yes, we can't let it be valid if
// we compact it into an other entry
when (IsKilledByBranch(io.brupdate, slot_uop)) {
next_state := s_invalid
}
when (!io.in_uop.valid) {
slot_uop.br_mask := next_br_mask
}
//-------------------------------------------------------------
// Request Logic
io.request := is_valid && p1 && p2 && p3 && ppred && !io.kill
val high_priority = slot_uop.is_br || slot_uop.is_jal || slot_uop.is_jalr
io.request_hp := io.request && high_priority
when (state === s_valid_1) {
io.request := p1 && p2 && p3 && ppred && !io.kill
} .elsewhen (state === s_valid_2) {
io.request := (p1 || p2) && ppred && !io.kill
} .otherwise {
io.request := false.B
}
//assign outputs
io.valid := is_valid
io.uop := slot_uop
io.uop.iw_p1_poisoned := p1_poisoned
io.uop.iw_p2_poisoned := p2_poisoned
// micro-op will vacate due to grant.
val may_vacate = io.grant && ((state === s_valid_1) || (state === s_valid_2) && p1 && p2 && ppred)
val squash_grant = io.ldspec_miss && (p1_poisoned || p2_poisoned)
io.will_be_valid := is_valid && !(may_vacate && !squash_grant)
io.out_uop := slot_uop
io.out_uop.iw_state := next_state
io.out_uop.uopc := next_uopc
io.out_uop.lrs1_rtype := next_lrs1_rtype
io.out_uop.lrs2_rtype := next_lrs2_rtype
io.out_uop.br_mask := next_br_mask
io.out_uop.prs1_busy := !p1
io.out_uop.prs2_busy := !p2
io.out_uop.prs3_busy := !p3
io.out_uop.ppred_busy := !ppred
io.out_uop.iw_p1_poisoned := p1_poisoned
io.out_uop.iw_p2_poisoned := p2_poisoned
when (state === s_valid_2) {
when (p1 && p2 && ppred) {
; // send out the entire instruction as one uop
} .elsewhen (p1 && ppred) {
io.uop.uopc := slot_uop.uopc
io.uop.lrs2_rtype := RT_X
} .elsewhen (p2 && ppred) {
io.uop.uopc := uopSTD
io.uop.lrs1_rtype := RT_X
}
}
// debug outputs
io.debug.p1 := p1
io.debug.p2 := p2
io.debug.p3 := p3
io.debug.ppred := ppred
io.debug.state := state
}
| module IssueSlot_29( // @[issue-slot.scala:69:7]
input clock, // @[issue-slot.scala:69:7]
input reset, // @[issue-slot.scala:69:7]
output io_valid, // @[issue-slot.scala:73:14]
output io_will_be_valid, // @[issue-slot.scala:73:14]
output io_request, // @[issue-slot.scala:73:14]
output io_request_hp, // @[issue-slot.scala:73:14]
input io_grant, // @[issue-slot.scala:73:14]
input [7:0] io_brupdate_b1_resolve_mask, // @[issue-slot.scala:73:14]
input [7:0] io_brupdate_b1_mispredict_mask, // @[issue-slot.scala:73:14]
input [6:0] io_brupdate_b2_uop_uopc, // @[issue-slot.scala:73:14]
input [31:0] io_brupdate_b2_uop_inst, // @[issue-slot.scala:73:14]
input [31:0] io_brupdate_b2_uop_debug_inst, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_rvc, // @[issue-slot.scala:73:14]
input [39:0] io_brupdate_b2_uop_debug_pc, // @[issue-slot.scala:73:14]
input [2:0] io_brupdate_b2_uop_iq_type, // @[issue-slot.scala:73:14]
input [9:0] io_brupdate_b2_uop_fu_code, // @[issue-slot.scala:73:14]
input [3:0] io_brupdate_b2_uop_ctrl_br_type, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_ctrl_op1_sel, // @[issue-slot.scala:73:14]
input [2:0] io_brupdate_b2_uop_ctrl_op2_sel, // @[issue-slot.scala:73:14]
input [2:0] io_brupdate_b2_uop_ctrl_imm_sel, // @[issue-slot.scala:73:14]
input [4:0] io_brupdate_b2_uop_ctrl_op_fcn, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_ctrl_fcn_dw, // @[issue-slot.scala:73:14]
input [2:0] io_brupdate_b2_uop_ctrl_csr_cmd, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_ctrl_is_load, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_ctrl_is_sta, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_ctrl_is_std, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_iw_state, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_iw_p1_poisoned, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_iw_p2_poisoned, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_br, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_jalr, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_jal, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_sfb, // @[issue-slot.scala:73:14]
input [7:0] io_brupdate_b2_uop_br_mask, // @[issue-slot.scala:73:14]
input [2:0] io_brupdate_b2_uop_br_tag, // @[issue-slot.scala:73:14]
input [3:0] io_brupdate_b2_uop_ftq_idx, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_edge_inst, // @[issue-slot.scala:73:14]
input [5:0] io_brupdate_b2_uop_pc_lob, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_taken, // @[issue-slot.scala:73:14]
input [19:0] io_brupdate_b2_uop_imm_packed, // @[issue-slot.scala:73:14]
input [11:0] io_brupdate_b2_uop_csr_addr, // @[issue-slot.scala:73:14]
input [4:0] io_brupdate_b2_uop_rob_idx, // @[issue-slot.scala:73:14]
input [2:0] io_brupdate_b2_uop_ldq_idx, // @[issue-slot.scala:73:14]
input [2:0] io_brupdate_b2_uop_stq_idx, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_rxq_idx, // @[issue-slot.scala:73:14]
input [5:0] io_brupdate_b2_uop_pdst, // @[issue-slot.scala:73:14]
input [5:0] io_brupdate_b2_uop_prs1, // @[issue-slot.scala:73:14]
input [5:0] io_brupdate_b2_uop_prs2, // @[issue-slot.scala:73:14]
input [5:0] io_brupdate_b2_uop_prs3, // @[issue-slot.scala:73:14]
input [3:0] io_brupdate_b2_uop_ppred, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_prs1_busy, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_prs2_busy, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_prs3_busy, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_ppred_busy, // @[issue-slot.scala:73:14]
input [5:0] io_brupdate_b2_uop_stale_pdst, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_exception, // @[issue-slot.scala:73:14]
input [63:0] io_brupdate_b2_uop_exc_cause, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_bypassable, // @[issue-slot.scala:73:14]
input [4:0] io_brupdate_b2_uop_mem_cmd, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_mem_size, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_mem_signed, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_fence, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_fencei, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_amo, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_uses_ldq, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_uses_stq, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_sys_pc2epc, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_unique, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_flush_on_commit, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_ldst_is_rs1, // @[issue-slot.scala:73:14]
input [5:0] io_brupdate_b2_uop_ldst, // @[issue-slot.scala:73:14]
input [5:0] io_brupdate_b2_uop_lrs1, // @[issue-slot.scala:73:14]
input [5:0] io_brupdate_b2_uop_lrs2, // @[issue-slot.scala:73:14]
input [5:0] io_brupdate_b2_uop_lrs3, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_ldst_val, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_dst_rtype, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_frs3_en, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_fp_val, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_fp_single, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_xcpt_pf_if, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_xcpt_ae_if, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_xcpt_ma_if, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_bp_debug_if, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_bp_xcpt_if, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_debug_fsrc, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_debug_tsrc, // @[issue-slot.scala:73:14]
input io_brupdate_b2_valid, // @[issue-slot.scala:73:14]
input io_brupdate_b2_mispredict, // @[issue-slot.scala:73:14]
input io_brupdate_b2_taken, // @[issue-slot.scala:73:14]
input [2:0] io_brupdate_b2_cfi_type, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_pc_sel, // @[issue-slot.scala:73:14]
input [39:0] io_brupdate_b2_jalr_target, // @[issue-slot.scala:73:14]
input [20:0] io_brupdate_b2_target_offset, // @[issue-slot.scala:73:14]
input io_kill, // @[issue-slot.scala:73:14]
input io_clear, // @[issue-slot.scala:73:14]
input io_wakeup_ports_0_valid, // @[issue-slot.scala:73:14]
input [5:0] io_wakeup_ports_0_bits_pdst, // @[issue-slot.scala:73:14]
input io_wakeup_ports_1_valid, // @[issue-slot.scala:73:14]
input [5:0] io_wakeup_ports_1_bits_pdst, // @[issue-slot.scala:73:14]
input io_in_uop_valid, // @[issue-slot.scala:73:14]
input [6:0] io_in_uop_bits_uopc, // @[issue-slot.scala:73:14]
input [31:0] io_in_uop_bits_inst, // @[issue-slot.scala:73:14]
input [31:0] io_in_uop_bits_debug_inst, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_rvc, // @[issue-slot.scala:73:14]
input [39:0] io_in_uop_bits_debug_pc, // @[issue-slot.scala:73:14]
input [2:0] io_in_uop_bits_iq_type, // @[issue-slot.scala:73:14]
input [9:0] io_in_uop_bits_fu_code, // @[issue-slot.scala:73:14]
input [3:0] io_in_uop_bits_ctrl_br_type, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_ctrl_op1_sel, // @[issue-slot.scala:73:14]
input [2:0] io_in_uop_bits_ctrl_op2_sel, // @[issue-slot.scala:73:14]
input [2:0] io_in_uop_bits_ctrl_imm_sel, // @[issue-slot.scala:73:14]
input [4:0] io_in_uop_bits_ctrl_op_fcn, // @[issue-slot.scala:73:14]
input io_in_uop_bits_ctrl_fcn_dw, // @[issue-slot.scala:73:14]
input [2:0] io_in_uop_bits_ctrl_csr_cmd, // @[issue-slot.scala:73:14]
input io_in_uop_bits_ctrl_is_load, // @[issue-slot.scala:73:14]
input io_in_uop_bits_ctrl_is_sta, // @[issue-slot.scala:73:14]
input io_in_uop_bits_ctrl_is_std, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_iw_state, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_br, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_jalr, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_jal, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_sfb, // @[issue-slot.scala:73:14]
input [7:0] io_in_uop_bits_br_mask, // @[issue-slot.scala:73:14]
input [2:0] io_in_uop_bits_br_tag, // @[issue-slot.scala:73:14]
input [3:0] io_in_uop_bits_ftq_idx, // @[issue-slot.scala:73:14]
input io_in_uop_bits_edge_inst, // @[issue-slot.scala:73:14]
input [5:0] io_in_uop_bits_pc_lob, // @[issue-slot.scala:73:14]
input io_in_uop_bits_taken, // @[issue-slot.scala:73:14]
input [19:0] io_in_uop_bits_imm_packed, // @[issue-slot.scala:73:14]
input [11:0] io_in_uop_bits_csr_addr, // @[issue-slot.scala:73:14]
input [4:0] io_in_uop_bits_rob_idx, // @[issue-slot.scala:73:14]
input [2:0] io_in_uop_bits_ldq_idx, // @[issue-slot.scala:73:14]
input [2:0] io_in_uop_bits_stq_idx, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_rxq_idx, // @[issue-slot.scala:73:14]
input [5:0] io_in_uop_bits_pdst, // @[issue-slot.scala:73:14]
input [5:0] io_in_uop_bits_prs1, // @[issue-slot.scala:73:14]
input [5:0] io_in_uop_bits_prs2, // @[issue-slot.scala:73:14]
input [5:0] io_in_uop_bits_prs3, // @[issue-slot.scala:73:14]
input [3:0] io_in_uop_bits_ppred, // @[issue-slot.scala:73:14]
input io_in_uop_bits_prs1_busy, // @[issue-slot.scala:73:14]
input io_in_uop_bits_prs2_busy, // @[issue-slot.scala:73:14]
input io_in_uop_bits_prs3_busy, // @[issue-slot.scala:73:14]
input io_in_uop_bits_ppred_busy, // @[issue-slot.scala:73:14]
input [5:0] io_in_uop_bits_stale_pdst, // @[issue-slot.scala:73:14]
input io_in_uop_bits_exception, // @[issue-slot.scala:73:14]
input [63:0] io_in_uop_bits_exc_cause, // @[issue-slot.scala:73:14]
input io_in_uop_bits_bypassable, // @[issue-slot.scala:73:14]
input [4:0] io_in_uop_bits_mem_cmd, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_mem_size, // @[issue-slot.scala:73:14]
input io_in_uop_bits_mem_signed, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_fence, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_fencei, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_amo, // @[issue-slot.scala:73:14]
input io_in_uop_bits_uses_ldq, // @[issue-slot.scala:73:14]
input io_in_uop_bits_uses_stq, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_sys_pc2epc, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_unique, // @[issue-slot.scala:73:14]
input io_in_uop_bits_flush_on_commit, // @[issue-slot.scala:73:14]
input io_in_uop_bits_ldst_is_rs1, // @[issue-slot.scala:73:14]
input [5:0] io_in_uop_bits_ldst, // @[issue-slot.scala:73:14]
input [5:0] io_in_uop_bits_lrs1, // @[issue-slot.scala:73:14]
input [5:0] io_in_uop_bits_lrs2, // @[issue-slot.scala:73:14]
input [5:0] io_in_uop_bits_lrs3, // @[issue-slot.scala:73:14]
input io_in_uop_bits_ldst_val, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_dst_rtype, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_lrs1_rtype, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_lrs2_rtype, // @[issue-slot.scala:73:14]
input io_in_uop_bits_frs3_en, // @[issue-slot.scala:73:14]
input io_in_uop_bits_fp_val, // @[issue-slot.scala:73:14]
input io_in_uop_bits_fp_single, // @[issue-slot.scala:73:14]
input io_in_uop_bits_xcpt_pf_if, // @[issue-slot.scala:73:14]
input io_in_uop_bits_xcpt_ae_if, // @[issue-slot.scala:73:14]
input io_in_uop_bits_xcpt_ma_if, // @[issue-slot.scala:73:14]
input io_in_uop_bits_bp_debug_if, // @[issue-slot.scala:73:14]
input io_in_uop_bits_bp_xcpt_if, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_debug_fsrc, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_debug_tsrc, // @[issue-slot.scala:73:14]
output [6:0] io_out_uop_uopc, // @[issue-slot.scala:73:14]
output [31:0] io_out_uop_inst, // @[issue-slot.scala:73:14]
output [31:0] io_out_uop_debug_inst, // @[issue-slot.scala:73:14]
output io_out_uop_is_rvc, // @[issue-slot.scala:73:14]
output [39:0] io_out_uop_debug_pc, // @[issue-slot.scala:73:14]
output [2:0] io_out_uop_iq_type, // @[issue-slot.scala:73:14]
output [9:0] io_out_uop_fu_code, // @[issue-slot.scala:73:14]
output [3:0] io_out_uop_ctrl_br_type, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_ctrl_op1_sel, // @[issue-slot.scala:73:14]
output [2:0] io_out_uop_ctrl_op2_sel, // @[issue-slot.scala:73:14]
output [2:0] io_out_uop_ctrl_imm_sel, // @[issue-slot.scala:73:14]
output [4:0] io_out_uop_ctrl_op_fcn, // @[issue-slot.scala:73:14]
output io_out_uop_ctrl_fcn_dw, // @[issue-slot.scala:73:14]
output [2:0] io_out_uop_ctrl_csr_cmd, // @[issue-slot.scala:73:14]
output io_out_uop_ctrl_is_load, // @[issue-slot.scala:73:14]
output io_out_uop_ctrl_is_sta, // @[issue-slot.scala:73:14]
output io_out_uop_ctrl_is_std, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_iw_state, // @[issue-slot.scala:73:14]
output io_out_uop_is_br, // @[issue-slot.scala:73:14]
output io_out_uop_is_jalr, // @[issue-slot.scala:73:14]
output io_out_uop_is_jal, // @[issue-slot.scala:73:14]
output io_out_uop_is_sfb, // @[issue-slot.scala:73:14]
output [7:0] io_out_uop_br_mask, // @[issue-slot.scala:73:14]
output [2:0] io_out_uop_br_tag, // @[issue-slot.scala:73:14]
output [3:0] io_out_uop_ftq_idx, // @[issue-slot.scala:73:14]
output io_out_uop_edge_inst, // @[issue-slot.scala:73:14]
output [5:0] io_out_uop_pc_lob, // @[issue-slot.scala:73:14]
output io_out_uop_taken, // @[issue-slot.scala:73:14]
output [19:0] io_out_uop_imm_packed, // @[issue-slot.scala:73:14]
output [11:0] io_out_uop_csr_addr, // @[issue-slot.scala:73:14]
output [4:0] io_out_uop_rob_idx, // @[issue-slot.scala:73:14]
output [2:0] io_out_uop_ldq_idx, // @[issue-slot.scala:73:14]
output [2:0] io_out_uop_stq_idx, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_rxq_idx, // @[issue-slot.scala:73:14]
output [5:0] io_out_uop_pdst, // @[issue-slot.scala:73:14]
output [5:0] io_out_uop_prs1, // @[issue-slot.scala:73:14]
output [5:0] io_out_uop_prs2, // @[issue-slot.scala:73:14]
output [5:0] io_out_uop_prs3, // @[issue-slot.scala:73:14]
output [3:0] io_out_uop_ppred, // @[issue-slot.scala:73:14]
output io_out_uop_prs1_busy, // @[issue-slot.scala:73:14]
output io_out_uop_prs2_busy, // @[issue-slot.scala:73:14]
output io_out_uop_prs3_busy, // @[issue-slot.scala:73:14]
output io_out_uop_ppred_busy, // @[issue-slot.scala:73:14]
output [5:0] io_out_uop_stale_pdst, // @[issue-slot.scala:73:14]
output io_out_uop_exception, // @[issue-slot.scala:73:14]
output [63:0] io_out_uop_exc_cause, // @[issue-slot.scala:73:14]
output io_out_uop_bypassable, // @[issue-slot.scala:73:14]
output [4:0] io_out_uop_mem_cmd, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_mem_size, // @[issue-slot.scala:73:14]
output io_out_uop_mem_signed, // @[issue-slot.scala:73:14]
output io_out_uop_is_fence, // @[issue-slot.scala:73:14]
output io_out_uop_is_fencei, // @[issue-slot.scala:73:14]
output io_out_uop_is_amo, // @[issue-slot.scala:73:14]
output io_out_uop_uses_ldq, // @[issue-slot.scala:73:14]
output io_out_uop_uses_stq, // @[issue-slot.scala:73:14]
output io_out_uop_is_sys_pc2epc, // @[issue-slot.scala:73:14]
output io_out_uop_is_unique, // @[issue-slot.scala:73:14]
output io_out_uop_flush_on_commit, // @[issue-slot.scala:73:14]
output io_out_uop_ldst_is_rs1, // @[issue-slot.scala:73:14]
output [5:0] io_out_uop_ldst, // @[issue-slot.scala:73:14]
output [5:0] io_out_uop_lrs1, // @[issue-slot.scala:73:14]
output [5:0] io_out_uop_lrs2, // @[issue-slot.scala:73:14]
output [5:0] io_out_uop_lrs3, // @[issue-slot.scala:73:14]
output io_out_uop_ldst_val, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_dst_rtype, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_lrs1_rtype, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_lrs2_rtype, // @[issue-slot.scala:73:14]
output io_out_uop_frs3_en, // @[issue-slot.scala:73:14]
output io_out_uop_fp_val, // @[issue-slot.scala:73:14]
output io_out_uop_fp_single, // @[issue-slot.scala:73:14]
output io_out_uop_xcpt_pf_if, // @[issue-slot.scala:73:14]
output io_out_uop_xcpt_ae_if, // @[issue-slot.scala:73:14]
output io_out_uop_xcpt_ma_if, // @[issue-slot.scala:73:14]
output io_out_uop_bp_debug_if, // @[issue-slot.scala:73:14]
output io_out_uop_bp_xcpt_if, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_debug_fsrc, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_debug_tsrc, // @[issue-slot.scala:73:14]
output [6:0] io_uop_uopc, // @[issue-slot.scala:73:14]
output [31:0] io_uop_inst, // @[issue-slot.scala:73:14]
output [31:0] io_uop_debug_inst, // @[issue-slot.scala:73:14]
output io_uop_is_rvc, // @[issue-slot.scala:73:14]
output [39:0] io_uop_debug_pc, // @[issue-slot.scala:73:14]
output [2:0] io_uop_iq_type, // @[issue-slot.scala:73:14]
output [9:0] io_uop_fu_code, // @[issue-slot.scala:73:14]
output [3:0] io_uop_ctrl_br_type, // @[issue-slot.scala:73:14]
output [1:0] io_uop_ctrl_op1_sel, // @[issue-slot.scala:73:14]
output [2:0] io_uop_ctrl_op2_sel, // @[issue-slot.scala:73:14]
output [2:0] io_uop_ctrl_imm_sel, // @[issue-slot.scala:73:14]
output [4:0] io_uop_ctrl_op_fcn, // @[issue-slot.scala:73:14]
output io_uop_ctrl_fcn_dw, // @[issue-slot.scala:73:14]
output [2:0] io_uop_ctrl_csr_cmd, // @[issue-slot.scala:73:14]
output io_uop_ctrl_is_load, // @[issue-slot.scala:73:14]
output io_uop_ctrl_is_sta, // @[issue-slot.scala:73:14]
output io_uop_ctrl_is_std, // @[issue-slot.scala:73:14]
output [1:0] io_uop_iw_state, // @[issue-slot.scala:73:14]
output io_uop_is_br, // @[issue-slot.scala:73:14]
output io_uop_is_jalr, // @[issue-slot.scala:73:14]
output io_uop_is_jal, // @[issue-slot.scala:73:14]
output io_uop_is_sfb, // @[issue-slot.scala:73:14]
output [7:0] io_uop_br_mask, // @[issue-slot.scala:73:14]
output [2:0] io_uop_br_tag, // @[issue-slot.scala:73:14]
output [3:0] io_uop_ftq_idx, // @[issue-slot.scala:73:14]
output io_uop_edge_inst, // @[issue-slot.scala:73:14]
output [5:0] io_uop_pc_lob, // @[issue-slot.scala:73:14]
output io_uop_taken, // @[issue-slot.scala:73:14]
output [19:0] io_uop_imm_packed, // @[issue-slot.scala:73:14]
output [11:0] io_uop_csr_addr, // @[issue-slot.scala:73:14]
output [4:0] io_uop_rob_idx, // @[issue-slot.scala:73:14]
output [2:0] io_uop_ldq_idx, // @[issue-slot.scala:73:14]
output [2:0] io_uop_stq_idx, // @[issue-slot.scala:73:14]
output [1:0] io_uop_rxq_idx, // @[issue-slot.scala:73:14]
output [5:0] io_uop_pdst, // @[issue-slot.scala:73:14]
output [5:0] io_uop_prs1, // @[issue-slot.scala:73:14]
output [5:0] io_uop_prs2, // @[issue-slot.scala:73:14]
output [5:0] io_uop_prs3, // @[issue-slot.scala:73:14]
output [3:0] io_uop_ppred, // @[issue-slot.scala:73:14]
output io_uop_prs1_busy, // @[issue-slot.scala:73:14]
output io_uop_prs2_busy, // @[issue-slot.scala:73:14]
output io_uop_prs3_busy, // @[issue-slot.scala:73:14]
output io_uop_ppred_busy, // @[issue-slot.scala:73:14]
output [5:0] io_uop_stale_pdst, // @[issue-slot.scala:73:14]
output io_uop_exception, // @[issue-slot.scala:73:14]
output [63:0] io_uop_exc_cause, // @[issue-slot.scala:73:14]
output io_uop_bypassable, // @[issue-slot.scala:73:14]
output [4:0] io_uop_mem_cmd, // @[issue-slot.scala:73:14]
output [1:0] io_uop_mem_size, // @[issue-slot.scala:73:14]
output io_uop_mem_signed, // @[issue-slot.scala:73:14]
output io_uop_is_fence, // @[issue-slot.scala:73:14]
output io_uop_is_fencei, // @[issue-slot.scala:73:14]
output io_uop_is_amo, // @[issue-slot.scala:73:14]
output io_uop_uses_ldq, // @[issue-slot.scala:73:14]
output io_uop_uses_stq, // @[issue-slot.scala:73:14]
output io_uop_is_sys_pc2epc, // @[issue-slot.scala:73:14]
output io_uop_is_unique, // @[issue-slot.scala:73:14]
output io_uop_flush_on_commit, // @[issue-slot.scala:73:14]
output io_uop_ldst_is_rs1, // @[issue-slot.scala:73:14]
output [5:0] io_uop_ldst, // @[issue-slot.scala:73:14]
output [5:0] io_uop_lrs1, // @[issue-slot.scala:73:14]
output [5:0] io_uop_lrs2, // @[issue-slot.scala:73:14]
output [5:0] io_uop_lrs3, // @[issue-slot.scala:73:14]
output io_uop_ldst_val, // @[issue-slot.scala:73:14]
output [1:0] io_uop_dst_rtype, // @[issue-slot.scala:73:14]
output [1:0] io_uop_lrs1_rtype, // @[issue-slot.scala:73:14]
output [1:0] io_uop_lrs2_rtype, // @[issue-slot.scala:73:14]
output io_uop_frs3_en, // @[issue-slot.scala:73:14]
output io_uop_fp_val, // @[issue-slot.scala:73:14]
output io_uop_fp_single, // @[issue-slot.scala:73:14]
output io_uop_xcpt_pf_if, // @[issue-slot.scala:73:14]
output io_uop_xcpt_ae_if, // @[issue-slot.scala:73:14]
output io_uop_xcpt_ma_if, // @[issue-slot.scala:73:14]
output io_uop_bp_debug_if, // @[issue-slot.scala:73:14]
output io_uop_bp_xcpt_if, // @[issue-slot.scala:73:14]
output [1:0] io_uop_debug_fsrc, // @[issue-slot.scala:73:14]
output [1:0] io_uop_debug_tsrc, // @[issue-slot.scala:73:14]
output io_debug_p1, // @[issue-slot.scala:73:14]
output io_debug_p2, // @[issue-slot.scala:73:14]
output io_debug_p3, // @[issue-slot.scala:73:14]
output io_debug_ppred, // @[issue-slot.scala:73:14]
output [1:0] io_debug_state // @[issue-slot.scala:73:14]
);
wire io_grant_0 = io_grant; // @[issue-slot.scala:69:7]
wire [7:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[issue-slot.scala:69:7]
wire [7:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[issue-slot.scala:69:7]
wire [6:0] io_brupdate_b2_uop_uopc_0 = io_brupdate_b2_uop_uopc; // @[issue-slot.scala:69:7]
wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[issue-slot.scala:69:7]
wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[issue-slot.scala:69:7]
wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[issue-slot.scala:69:7]
wire [2:0] io_brupdate_b2_uop_iq_type_0 = io_brupdate_b2_uop_iq_type; // @[issue-slot.scala:69:7]
wire [9:0] io_brupdate_b2_uop_fu_code_0 = io_brupdate_b2_uop_fu_code; // @[issue-slot.scala:69:7]
wire [3:0] io_brupdate_b2_uop_ctrl_br_type_0 = io_brupdate_b2_uop_ctrl_br_type; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_ctrl_op1_sel_0 = io_brupdate_b2_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7]
wire [2:0] io_brupdate_b2_uop_ctrl_op2_sel_0 = io_brupdate_b2_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7]
wire [2:0] io_brupdate_b2_uop_ctrl_imm_sel_0 = io_brupdate_b2_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7]
wire [4:0] io_brupdate_b2_uop_ctrl_op_fcn_0 = io_brupdate_b2_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_ctrl_fcn_dw_0 = io_brupdate_b2_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7]
wire [2:0] io_brupdate_b2_uop_ctrl_csr_cmd_0 = io_brupdate_b2_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_ctrl_is_load_0 = io_brupdate_b2_uop_ctrl_is_load; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_ctrl_is_sta_0 = io_brupdate_b2_uop_ctrl_is_sta; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_ctrl_is_std_0 = io_brupdate_b2_uop_ctrl_is_std; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_iw_state_0 = io_brupdate_b2_uop_iw_state; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_iw_p1_poisoned_0 = io_brupdate_b2_uop_iw_p1_poisoned; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_iw_p2_poisoned_0 = io_brupdate_b2_uop_iw_p2_poisoned; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_br_0 = io_brupdate_b2_uop_is_br; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_jalr_0 = io_brupdate_b2_uop_is_jalr; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_jal_0 = io_brupdate_b2_uop_is_jal; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[issue-slot.scala:69:7]
wire [7:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[issue-slot.scala:69:7]
wire [2:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[issue-slot.scala:69:7]
wire [3:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[issue-slot.scala:69:7]
wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[issue-slot.scala:69:7]
wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[issue-slot.scala:69:7]
wire [11:0] io_brupdate_b2_uop_csr_addr_0 = io_brupdate_b2_uop_csr_addr; // @[issue-slot.scala:69:7]
wire [4:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[issue-slot.scala:69:7]
wire [2:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[issue-slot.scala:69:7]
wire [2:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[issue-slot.scala:69:7]
wire [5:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[issue-slot.scala:69:7]
wire [5:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[issue-slot.scala:69:7]
wire [5:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[issue-slot.scala:69:7]
wire [5:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[issue-slot.scala:69:7]
wire [3:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[issue-slot.scala:69:7]
wire [5:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[issue-slot.scala:69:7]
wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_bypassable_0 = io_brupdate_b2_uop_bypassable; // @[issue-slot.scala:69:7]
wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[issue-slot.scala:69:7]
wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[issue-slot.scala:69:7]
wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[issue-slot.scala:69:7]
wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[issue-slot.scala:69:7]
wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_ldst_val_0 = io_brupdate_b2_uop_ldst_val; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_fp_single_0 = io_brupdate_b2_uop_fp_single; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_valid_0 = io_brupdate_b2_valid; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[issue-slot.scala:69:7]
wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[issue-slot.scala:69:7]
wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[issue-slot.scala:69:7]
wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[issue-slot.scala:69:7]
wire io_kill_0 = io_kill; // @[issue-slot.scala:69:7]
wire io_clear_0 = io_clear; // @[issue-slot.scala:69:7]
wire io_wakeup_ports_0_valid_0 = io_wakeup_ports_0_valid; // @[issue-slot.scala:69:7]
wire [5:0] io_wakeup_ports_0_bits_pdst_0 = io_wakeup_ports_0_bits_pdst; // @[issue-slot.scala:69:7]
wire io_wakeup_ports_1_valid_0 = io_wakeup_ports_1_valid; // @[issue-slot.scala:69:7]
wire [5:0] io_wakeup_ports_1_bits_pdst_0 = io_wakeup_ports_1_bits_pdst; // @[issue-slot.scala:69:7]
wire io_in_uop_valid_0 = io_in_uop_valid; // @[issue-slot.scala:69:7]
wire [6:0] io_in_uop_bits_uopc_0 = io_in_uop_bits_uopc; // @[issue-slot.scala:69:7]
wire [31:0] io_in_uop_bits_inst_0 = io_in_uop_bits_inst; // @[issue-slot.scala:69:7]
wire [31:0] io_in_uop_bits_debug_inst_0 = io_in_uop_bits_debug_inst; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_rvc_0 = io_in_uop_bits_is_rvc; // @[issue-slot.scala:69:7]
wire [39:0] io_in_uop_bits_debug_pc_0 = io_in_uop_bits_debug_pc; // @[issue-slot.scala:69:7]
wire [2:0] io_in_uop_bits_iq_type_0 = io_in_uop_bits_iq_type; // @[issue-slot.scala:69:7]
wire [9:0] io_in_uop_bits_fu_code_0 = io_in_uop_bits_fu_code; // @[issue-slot.scala:69:7]
wire [3:0] io_in_uop_bits_ctrl_br_type_0 = io_in_uop_bits_ctrl_br_type; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_ctrl_op1_sel_0 = io_in_uop_bits_ctrl_op1_sel; // @[issue-slot.scala:69:7]
wire [2:0] io_in_uop_bits_ctrl_op2_sel_0 = io_in_uop_bits_ctrl_op2_sel; // @[issue-slot.scala:69:7]
wire [2:0] io_in_uop_bits_ctrl_imm_sel_0 = io_in_uop_bits_ctrl_imm_sel; // @[issue-slot.scala:69:7]
wire [4:0] io_in_uop_bits_ctrl_op_fcn_0 = io_in_uop_bits_ctrl_op_fcn; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_ctrl_fcn_dw_0 = io_in_uop_bits_ctrl_fcn_dw; // @[issue-slot.scala:69:7]
wire [2:0] io_in_uop_bits_ctrl_csr_cmd_0 = io_in_uop_bits_ctrl_csr_cmd; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_ctrl_is_load_0 = io_in_uop_bits_ctrl_is_load; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_ctrl_is_sta_0 = io_in_uop_bits_ctrl_is_sta; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_ctrl_is_std_0 = io_in_uop_bits_ctrl_is_std; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_iw_state_0 = io_in_uop_bits_iw_state; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_br_0 = io_in_uop_bits_is_br; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_jalr_0 = io_in_uop_bits_is_jalr; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_jal_0 = io_in_uop_bits_is_jal; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_sfb_0 = io_in_uop_bits_is_sfb; // @[issue-slot.scala:69:7]
wire [7:0] io_in_uop_bits_br_mask_0 = io_in_uop_bits_br_mask; // @[issue-slot.scala:69:7]
wire [2:0] io_in_uop_bits_br_tag_0 = io_in_uop_bits_br_tag; // @[issue-slot.scala:69:7]
wire [3:0] io_in_uop_bits_ftq_idx_0 = io_in_uop_bits_ftq_idx; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_edge_inst_0 = io_in_uop_bits_edge_inst; // @[issue-slot.scala:69:7]
wire [5:0] io_in_uop_bits_pc_lob_0 = io_in_uop_bits_pc_lob; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_taken_0 = io_in_uop_bits_taken; // @[issue-slot.scala:69:7]
wire [19:0] io_in_uop_bits_imm_packed_0 = io_in_uop_bits_imm_packed; // @[issue-slot.scala:69:7]
wire [11:0] io_in_uop_bits_csr_addr_0 = io_in_uop_bits_csr_addr; // @[issue-slot.scala:69:7]
wire [4:0] io_in_uop_bits_rob_idx_0 = io_in_uop_bits_rob_idx; // @[issue-slot.scala:69:7]
wire [2:0] io_in_uop_bits_ldq_idx_0 = io_in_uop_bits_ldq_idx; // @[issue-slot.scala:69:7]
wire [2:0] io_in_uop_bits_stq_idx_0 = io_in_uop_bits_stq_idx; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_rxq_idx_0 = io_in_uop_bits_rxq_idx; // @[issue-slot.scala:69:7]
wire [5:0] io_in_uop_bits_pdst_0 = io_in_uop_bits_pdst; // @[issue-slot.scala:69:7]
wire [5:0] io_in_uop_bits_prs1_0 = io_in_uop_bits_prs1; // @[issue-slot.scala:69:7]
wire [5:0] io_in_uop_bits_prs2_0 = io_in_uop_bits_prs2; // @[issue-slot.scala:69:7]
wire [5:0] io_in_uop_bits_prs3_0 = io_in_uop_bits_prs3; // @[issue-slot.scala:69:7]
wire [3:0] io_in_uop_bits_ppred_0 = io_in_uop_bits_ppred; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_prs1_busy_0 = io_in_uop_bits_prs1_busy; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_prs2_busy_0 = io_in_uop_bits_prs2_busy; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_prs3_busy_0 = io_in_uop_bits_prs3_busy; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_ppred_busy_0 = io_in_uop_bits_ppred_busy; // @[issue-slot.scala:69:7]
wire [5:0] io_in_uop_bits_stale_pdst_0 = io_in_uop_bits_stale_pdst; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_exception_0 = io_in_uop_bits_exception; // @[issue-slot.scala:69:7]
wire [63:0] io_in_uop_bits_exc_cause_0 = io_in_uop_bits_exc_cause; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_bypassable_0 = io_in_uop_bits_bypassable; // @[issue-slot.scala:69:7]
wire [4:0] io_in_uop_bits_mem_cmd_0 = io_in_uop_bits_mem_cmd; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_mem_size_0 = io_in_uop_bits_mem_size; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_mem_signed_0 = io_in_uop_bits_mem_signed; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_fence_0 = io_in_uop_bits_is_fence; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_fencei_0 = io_in_uop_bits_is_fencei; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_amo_0 = io_in_uop_bits_is_amo; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_uses_ldq_0 = io_in_uop_bits_uses_ldq; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_uses_stq_0 = io_in_uop_bits_uses_stq; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_sys_pc2epc_0 = io_in_uop_bits_is_sys_pc2epc; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_unique_0 = io_in_uop_bits_is_unique; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_flush_on_commit_0 = io_in_uop_bits_flush_on_commit; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_ldst_is_rs1_0 = io_in_uop_bits_ldst_is_rs1; // @[issue-slot.scala:69:7]
wire [5:0] io_in_uop_bits_ldst_0 = io_in_uop_bits_ldst; // @[issue-slot.scala:69:7]
wire [5:0] io_in_uop_bits_lrs1_0 = io_in_uop_bits_lrs1; // @[issue-slot.scala:69:7]
wire [5:0] io_in_uop_bits_lrs2_0 = io_in_uop_bits_lrs2; // @[issue-slot.scala:69:7]
wire [5:0] io_in_uop_bits_lrs3_0 = io_in_uop_bits_lrs3; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_ldst_val_0 = io_in_uop_bits_ldst_val; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_dst_rtype_0 = io_in_uop_bits_dst_rtype; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_lrs1_rtype_0 = io_in_uop_bits_lrs1_rtype; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_lrs2_rtype_0 = io_in_uop_bits_lrs2_rtype; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_frs3_en_0 = io_in_uop_bits_frs3_en; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_fp_val_0 = io_in_uop_bits_fp_val; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_fp_single_0 = io_in_uop_bits_fp_single; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_xcpt_pf_if_0 = io_in_uop_bits_xcpt_pf_if; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_xcpt_ae_if_0 = io_in_uop_bits_xcpt_ae_if; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_xcpt_ma_if_0 = io_in_uop_bits_xcpt_ma_if; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_bp_debug_if_0 = io_in_uop_bits_bp_debug_if; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_bp_xcpt_if_0 = io_in_uop_bits_bp_xcpt_if; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_debug_fsrc_0 = io_in_uop_bits_debug_fsrc; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_debug_tsrc_0 = io_in_uop_bits_debug_tsrc; // @[issue-slot.scala:69:7]
wire io_ldspec_miss = 1'h0; // @[issue-slot.scala:69:7]
wire io_wakeup_ports_0_bits_poisoned = 1'h0; // @[issue-slot.scala:69:7]
wire io_wakeup_ports_1_bits_poisoned = 1'h0; // @[issue-slot.scala:69:7]
wire io_pred_wakeup_port_valid = 1'h0; // @[issue-slot.scala:69:7]
wire io_spec_ld_wakeup_0_valid = 1'h0; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_iw_p1_poisoned = 1'h0; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_iw_p2_poisoned = 1'h0; // @[issue-slot.scala:69:7]
wire io_out_uop_iw_p1_poisoned = 1'h0; // @[issue-slot.scala:69:7]
wire io_out_uop_iw_p2_poisoned = 1'h0; // @[issue-slot.scala:69:7]
wire io_uop_iw_p1_poisoned = 1'h0; // @[issue-slot.scala:69:7]
wire io_uop_iw_p2_poisoned = 1'h0; // @[issue-slot.scala:69:7]
wire next_p1_poisoned = 1'h0; // @[issue-slot.scala:99:29]
wire next_p2_poisoned = 1'h0; // @[issue-slot.scala:100:29]
wire slot_uop_uop_is_rvc = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_ctrl_fcn_dw = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_ctrl_is_load = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_ctrl_is_sta = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_ctrl_is_std = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_iw_p1_poisoned = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_iw_p2_poisoned = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_br = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_jalr = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_jal = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_sfb = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_edge_inst = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_taken = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_prs1_busy = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_prs2_busy = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_prs3_busy = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_ppred_busy = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_exception = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_bypassable = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_mem_signed = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_fence = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_fencei = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_amo = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_uses_ldq = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_uses_stq = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_sys_pc2epc = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_unique = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_flush_on_commit = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_ldst_is_rs1 = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_ldst_val = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_frs3_en = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_fp_val = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_fp_single = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_xcpt_pf_if = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_xcpt_ae_if = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_xcpt_ma_if = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_bp_debug_if = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_bp_xcpt_if = 1'h0; // @[consts.scala:269:19]
wire slot_uop_cs_fcn_dw = 1'h0; // @[consts.scala:279:18]
wire slot_uop_cs_is_load = 1'h0; // @[consts.scala:279:18]
wire slot_uop_cs_is_sta = 1'h0; // @[consts.scala:279:18]
wire slot_uop_cs_is_std = 1'h0; // @[consts.scala:279:18]
wire _squash_grant_T = 1'h0; // @[issue-slot.scala:261:53]
wire squash_grant = 1'h0; // @[issue-slot.scala:261:37]
wire [3:0] io_pred_wakeup_port_bits = 4'h0; // @[issue-slot.scala:69:7]
wire [3:0] slot_uop_uop_ctrl_br_type = 4'h0; // @[consts.scala:269:19]
wire [3:0] slot_uop_uop_ftq_idx = 4'h0; // @[consts.scala:269:19]
wire [3:0] slot_uop_uop_ppred = 4'h0; // @[consts.scala:269:19]
wire [3:0] slot_uop_cs_br_type = 4'h0; // @[consts.scala:279:18]
wire [5:0] io_spec_ld_wakeup_0_bits = 6'h0; // @[issue-slot.scala:69:7]
wire [5:0] slot_uop_uop_pc_lob = 6'h0; // @[consts.scala:269:19]
wire [5:0] slot_uop_uop_pdst = 6'h0; // @[consts.scala:269:19]
wire [5:0] slot_uop_uop_prs1 = 6'h0; // @[consts.scala:269:19]
wire [5:0] slot_uop_uop_prs2 = 6'h0; // @[consts.scala:269:19]
wire [5:0] slot_uop_uop_prs3 = 6'h0; // @[consts.scala:269:19]
wire [5:0] slot_uop_uop_stale_pdst = 6'h0; // @[consts.scala:269:19]
wire [5:0] slot_uop_uop_ldst = 6'h0; // @[consts.scala:269:19]
wire [5:0] slot_uop_uop_lrs1 = 6'h0; // @[consts.scala:269:19]
wire [5:0] slot_uop_uop_lrs2 = 6'h0; // @[consts.scala:269:19]
wire [5:0] slot_uop_uop_lrs3 = 6'h0; // @[consts.scala:269:19]
wire _io_will_be_valid_T_1 = 1'h1; // @[issue-slot.scala:262:51]
wire [1:0] slot_uop_uop_ctrl_op1_sel = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_uop_iw_state = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_uop_rxq_idx = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_uop_mem_size = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_uop_lrs1_rtype = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_uop_lrs2_rtype = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_uop_debug_fsrc = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_uop_debug_tsrc = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_cs_op1_sel = 2'h0; // @[consts.scala:279:18]
wire [2:0] slot_uop_uop_iq_type = 3'h0; // @[consts.scala:269:19]
wire [2:0] slot_uop_uop_ctrl_op2_sel = 3'h0; // @[consts.scala:269:19]
wire [2:0] slot_uop_uop_ctrl_imm_sel = 3'h0; // @[consts.scala:269:19]
wire [2:0] slot_uop_uop_ctrl_csr_cmd = 3'h0; // @[consts.scala:269:19]
wire [2:0] slot_uop_uop_br_tag = 3'h0; // @[consts.scala:269:19]
wire [2:0] slot_uop_uop_ldq_idx = 3'h0; // @[consts.scala:269:19]
wire [2:0] slot_uop_uop_stq_idx = 3'h0; // @[consts.scala:269:19]
wire [2:0] slot_uop_cs_op2_sel = 3'h0; // @[consts.scala:279:18]
wire [2:0] slot_uop_cs_imm_sel = 3'h0; // @[consts.scala:279:18]
wire [2:0] slot_uop_cs_csr_cmd = 3'h0; // @[consts.scala:279:18]
wire [4:0] slot_uop_uop_ctrl_op_fcn = 5'h0; // @[consts.scala:269:19]
wire [4:0] slot_uop_uop_rob_idx = 5'h0; // @[consts.scala:269:19]
wire [4:0] slot_uop_uop_mem_cmd = 5'h0; // @[consts.scala:269:19]
wire [4:0] slot_uop_cs_op_fcn = 5'h0; // @[consts.scala:279:18]
wire [1:0] slot_uop_uop_dst_rtype = 2'h2; // @[consts.scala:269:19]
wire [63:0] slot_uop_uop_exc_cause = 64'h0; // @[consts.scala:269:19]
wire [11:0] slot_uop_uop_csr_addr = 12'h0; // @[consts.scala:269:19]
wire [19:0] slot_uop_uop_imm_packed = 20'h0; // @[consts.scala:269:19]
wire [7:0] slot_uop_uop_br_mask = 8'h0; // @[consts.scala:269:19]
wire [9:0] slot_uop_uop_fu_code = 10'h0; // @[consts.scala:269:19]
wire [39:0] slot_uop_uop_debug_pc = 40'h0; // @[consts.scala:269:19]
wire [31:0] slot_uop_uop_inst = 32'h0; // @[consts.scala:269:19]
wire [31:0] slot_uop_uop_debug_inst = 32'h0; // @[consts.scala:269:19]
wire [6:0] slot_uop_uop_uopc = 7'h0; // @[consts.scala:269:19]
wire _io_valid_T; // @[issue-slot.scala:79:24]
wire _io_will_be_valid_T_4; // @[issue-slot.scala:262:32]
wire _io_request_hp_T; // @[issue-slot.scala:243:31]
wire [6:0] next_uopc; // @[issue-slot.scala:82:29]
wire [1:0] next_state; // @[issue-slot.scala:81:29]
wire [7:0] next_br_mask; // @[util.scala:85:25]
wire _io_out_uop_prs1_busy_T; // @[issue-slot.scala:270:28]
wire _io_out_uop_prs2_busy_T; // @[issue-slot.scala:271:28]
wire _io_out_uop_prs3_busy_T; // @[issue-slot.scala:272:28]
wire _io_out_uop_ppred_busy_T; // @[issue-slot.scala:273:28]
wire [1:0] next_lrs1_rtype; // @[issue-slot.scala:83:29]
wire [1:0] next_lrs2_rtype; // @[issue-slot.scala:84:29]
wire [3:0] io_out_uop_ctrl_br_type_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_ctrl_op1_sel_0; // @[issue-slot.scala:69:7]
wire [2:0] io_out_uop_ctrl_op2_sel_0; // @[issue-slot.scala:69:7]
wire [2:0] io_out_uop_ctrl_imm_sel_0; // @[issue-slot.scala:69:7]
wire [4:0] io_out_uop_ctrl_op_fcn_0; // @[issue-slot.scala:69:7]
wire io_out_uop_ctrl_fcn_dw_0; // @[issue-slot.scala:69:7]
wire [2:0] io_out_uop_ctrl_csr_cmd_0; // @[issue-slot.scala:69:7]
wire io_out_uop_ctrl_is_load_0; // @[issue-slot.scala:69:7]
wire io_out_uop_ctrl_is_sta_0; // @[issue-slot.scala:69:7]
wire io_out_uop_ctrl_is_std_0; // @[issue-slot.scala:69:7]
wire [6:0] io_out_uop_uopc_0; // @[issue-slot.scala:69:7]
wire [31:0] io_out_uop_inst_0; // @[issue-slot.scala:69:7]
wire [31:0] io_out_uop_debug_inst_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_rvc_0; // @[issue-slot.scala:69:7]
wire [39:0] io_out_uop_debug_pc_0; // @[issue-slot.scala:69:7]
wire [2:0] io_out_uop_iq_type_0; // @[issue-slot.scala:69:7]
wire [9:0] io_out_uop_fu_code_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_iw_state_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_br_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_jalr_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_jal_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_sfb_0; // @[issue-slot.scala:69:7]
wire [7:0] io_out_uop_br_mask_0; // @[issue-slot.scala:69:7]
wire [2:0] io_out_uop_br_tag_0; // @[issue-slot.scala:69:7]
wire [3:0] io_out_uop_ftq_idx_0; // @[issue-slot.scala:69:7]
wire io_out_uop_edge_inst_0; // @[issue-slot.scala:69:7]
wire [5:0] io_out_uop_pc_lob_0; // @[issue-slot.scala:69:7]
wire io_out_uop_taken_0; // @[issue-slot.scala:69:7]
wire [19:0] io_out_uop_imm_packed_0; // @[issue-slot.scala:69:7]
wire [11:0] io_out_uop_csr_addr_0; // @[issue-slot.scala:69:7]
wire [4:0] io_out_uop_rob_idx_0; // @[issue-slot.scala:69:7]
wire [2:0] io_out_uop_ldq_idx_0; // @[issue-slot.scala:69:7]
wire [2:0] io_out_uop_stq_idx_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_rxq_idx_0; // @[issue-slot.scala:69:7]
wire [5:0] io_out_uop_pdst_0; // @[issue-slot.scala:69:7]
wire [5:0] io_out_uop_prs1_0; // @[issue-slot.scala:69:7]
wire [5:0] io_out_uop_prs2_0; // @[issue-slot.scala:69:7]
wire [5:0] io_out_uop_prs3_0; // @[issue-slot.scala:69:7]
wire [3:0] io_out_uop_ppred_0; // @[issue-slot.scala:69:7]
wire io_out_uop_prs1_busy_0; // @[issue-slot.scala:69:7]
wire io_out_uop_prs2_busy_0; // @[issue-slot.scala:69:7]
wire io_out_uop_prs3_busy_0; // @[issue-slot.scala:69:7]
wire io_out_uop_ppred_busy_0; // @[issue-slot.scala:69:7]
wire [5:0] io_out_uop_stale_pdst_0; // @[issue-slot.scala:69:7]
wire io_out_uop_exception_0; // @[issue-slot.scala:69:7]
wire [63:0] io_out_uop_exc_cause_0; // @[issue-slot.scala:69:7]
wire io_out_uop_bypassable_0; // @[issue-slot.scala:69:7]
wire [4:0] io_out_uop_mem_cmd_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_mem_size_0; // @[issue-slot.scala:69:7]
wire io_out_uop_mem_signed_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_fence_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_fencei_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_amo_0; // @[issue-slot.scala:69:7]
wire io_out_uop_uses_ldq_0; // @[issue-slot.scala:69:7]
wire io_out_uop_uses_stq_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_sys_pc2epc_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_unique_0; // @[issue-slot.scala:69:7]
wire io_out_uop_flush_on_commit_0; // @[issue-slot.scala:69:7]
wire io_out_uop_ldst_is_rs1_0; // @[issue-slot.scala:69:7]
wire [5:0] io_out_uop_ldst_0; // @[issue-slot.scala:69:7]
wire [5:0] io_out_uop_lrs1_0; // @[issue-slot.scala:69:7]
wire [5:0] io_out_uop_lrs2_0; // @[issue-slot.scala:69:7]
wire [5:0] io_out_uop_lrs3_0; // @[issue-slot.scala:69:7]
wire io_out_uop_ldst_val_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_dst_rtype_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_lrs1_rtype_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_lrs2_rtype_0; // @[issue-slot.scala:69:7]
wire io_out_uop_frs3_en_0; // @[issue-slot.scala:69:7]
wire io_out_uop_fp_val_0; // @[issue-slot.scala:69:7]
wire io_out_uop_fp_single_0; // @[issue-slot.scala:69:7]
wire io_out_uop_xcpt_pf_if_0; // @[issue-slot.scala:69:7]
wire io_out_uop_xcpt_ae_if_0; // @[issue-slot.scala:69:7]
wire io_out_uop_xcpt_ma_if_0; // @[issue-slot.scala:69:7]
wire io_out_uop_bp_debug_if_0; // @[issue-slot.scala:69:7]
wire io_out_uop_bp_xcpt_if_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_debug_fsrc_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_debug_tsrc_0; // @[issue-slot.scala:69:7]
wire [3:0] io_uop_ctrl_br_type_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_ctrl_op1_sel_0; // @[issue-slot.scala:69:7]
wire [2:0] io_uop_ctrl_op2_sel_0; // @[issue-slot.scala:69:7]
wire [2:0] io_uop_ctrl_imm_sel_0; // @[issue-slot.scala:69:7]
wire [4:0] io_uop_ctrl_op_fcn_0; // @[issue-slot.scala:69:7]
wire io_uop_ctrl_fcn_dw_0; // @[issue-slot.scala:69:7]
wire [2:0] io_uop_ctrl_csr_cmd_0; // @[issue-slot.scala:69:7]
wire io_uop_ctrl_is_load_0; // @[issue-slot.scala:69:7]
wire io_uop_ctrl_is_sta_0; // @[issue-slot.scala:69:7]
wire io_uop_ctrl_is_std_0; // @[issue-slot.scala:69:7]
wire [6:0] io_uop_uopc_0; // @[issue-slot.scala:69:7]
wire [31:0] io_uop_inst_0; // @[issue-slot.scala:69:7]
wire [31:0] io_uop_debug_inst_0; // @[issue-slot.scala:69:7]
wire io_uop_is_rvc_0; // @[issue-slot.scala:69:7]
wire [39:0] io_uop_debug_pc_0; // @[issue-slot.scala:69:7]
wire [2:0] io_uop_iq_type_0; // @[issue-slot.scala:69:7]
wire [9:0] io_uop_fu_code_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_iw_state_0; // @[issue-slot.scala:69:7]
wire io_uop_is_br_0; // @[issue-slot.scala:69:7]
wire io_uop_is_jalr_0; // @[issue-slot.scala:69:7]
wire io_uop_is_jal_0; // @[issue-slot.scala:69:7]
wire io_uop_is_sfb_0; // @[issue-slot.scala:69:7]
wire [7:0] io_uop_br_mask_0; // @[issue-slot.scala:69:7]
wire [2:0] io_uop_br_tag_0; // @[issue-slot.scala:69:7]
wire [3:0] io_uop_ftq_idx_0; // @[issue-slot.scala:69:7]
wire io_uop_edge_inst_0; // @[issue-slot.scala:69:7]
wire [5:0] io_uop_pc_lob_0; // @[issue-slot.scala:69:7]
wire io_uop_taken_0; // @[issue-slot.scala:69:7]
wire [19:0] io_uop_imm_packed_0; // @[issue-slot.scala:69:7]
wire [11:0] io_uop_csr_addr_0; // @[issue-slot.scala:69:7]
wire [4:0] io_uop_rob_idx_0; // @[issue-slot.scala:69:7]
wire [2:0] io_uop_ldq_idx_0; // @[issue-slot.scala:69:7]
wire [2:0] io_uop_stq_idx_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_rxq_idx_0; // @[issue-slot.scala:69:7]
wire [5:0] io_uop_pdst_0; // @[issue-slot.scala:69:7]
wire [5:0] io_uop_prs1_0; // @[issue-slot.scala:69:7]
wire [5:0] io_uop_prs2_0; // @[issue-slot.scala:69:7]
wire [5:0] io_uop_prs3_0; // @[issue-slot.scala:69:7]
wire [3:0] io_uop_ppred_0; // @[issue-slot.scala:69:7]
wire io_uop_prs1_busy_0; // @[issue-slot.scala:69:7]
wire io_uop_prs2_busy_0; // @[issue-slot.scala:69:7]
wire io_uop_prs3_busy_0; // @[issue-slot.scala:69:7]
wire io_uop_ppred_busy_0; // @[issue-slot.scala:69:7]
wire [5:0] io_uop_stale_pdst_0; // @[issue-slot.scala:69:7]
wire io_uop_exception_0; // @[issue-slot.scala:69:7]
wire [63:0] io_uop_exc_cause_0; // @[issue-slot.scala:69:7]
wire io_uop_bypassable_0; // @[issue-slot.scala:69:7]
wire [4:0] io_uop_mem_cmd_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_mem_size_0; // @[issue-slot.scala:69:7]
wire io_uop_mem_signed_0; // @[issue-slot.scala:69:7]
wire io_uop_is_fence_0; // @[issue-slot.scala:69:7]
wire io_uop_is_fencei_0; // @[issue-slot.scala:69:7]
wire io_uop_is_amo_0; // @[issue-slot.scala:69:7]
wire io_uop_uses_ldq_0; // @[issue-slot.scala:69:7]
wire io_uop_uses_stq_0; // @[issue-slot.scala:69:7]
wire io_uop_is_sys_pc2epc_0; // @[issue-slot.scala:69:7]
wire io_uop_is_unique_0; // @[issue-slot.scala:69:7]
wire io_uop_flush_on_commit_0; // @[issue-slot.scala:69:7]
wire io_uop_ldst_is_rs1_0; // @[issue-slot.scala:69:7]
wire [5:0] io_uop_ldst_0; // @[issue-slot.scala:69:7]
wire [5:0] io_uop_lrs1_0; // @[issue-slot.scala:69:7]
wire [5:0] io_uop_lrs2_0; // @[issue-slot.scala:69:7]
wire [5:0] io_uop_lrs3_0; // @[issue-slot.scala:69:7]
wire io_uop_ldst_val_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_dst_rtype_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_lrs1_rtype_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_lrs2_rtype_0; // @[issue-slot.scala:69:7]
wire io_uop_frs3_en_0; // @[issue-slot.scala:69:7]
wire io_uop_fp_val_0; // @[issue-slot.scala:69:7]
wire io_uop_fp_single_0; // @[issue-slot.scala:69:7]
wire io_uop_xcpt_pf_if_0; // @[issue-slot.scala:69:7]
wire io_uop_xcpt_ae_if_0; // @[issue-slot.scala:69:7]
wire io_uop_xcpt_ma_if_0; // @[issue-slot.scala:69:7]
wire io_uop_bp_debug_if_0; // @[issue-slot.scala:69:7]
wire io_uop_bp_xcpt_if_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_debug_fsrc_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_debug_tsrc_0; // @[issue-slot.scala:69:7]
wire io_debug_p1_0; // @[issue-slot.scala:69:7]
wire io_debug_p2_0; // @[issue-slot.scala:69:7]
wire io_debug_p3_0; // @[issue-slot.scala:69:7]
wire io_debug_ppred_0; // @[issue-slot.scala:69:7]
wire [1:0] io_debug_state_0; // @[issue-slot.scala:69:7]
wire io_valid_0; // @[issue-slot.scala:69:7]
wire io_will_be_valid_0; // @[issue-slot.scala:69:7]
wire io_request_0; // @[issue-slot.scala:69:7]
wire io_request_hp_0; // @[issue-slot.scala:69:7]
assign io_out_uop_iw_state_0 = next_state; // @[issue-slot.scala:69:7, :81:29]
assign io_out_uop_uopc_0 = next_uopc; // @[issue-slot.scala:69:7, :82:29]
assign io_out_uop_lrs1_rtype_0 = next_lrs1_rtype; // @[issue-slot.scala:69:7, :83:29]
assign io_out_uop_lrs2_rtype_0 = next_lrs2_rtype; // @[issue-slot.scala:69:7, :84:29]
reg [1:0] state; // @[issue-slot.scala:86:22]
assign io_debug_state_0 = state; // @[issue-slot.scala:69:7, :86:22]
reg p1; // @[issue-slot.scala:87:22]
assign io_debug_p1_0 = p1; // @[issue-slot.scala:69:7, :87:22]
wire next_p1 = p1; // @[issue-slot.scala:87:22, :163:25]
reg p2; // @[issue-slot.scala:88:22]
assign io_debug_p2_0 = p2; // @[issue-slot.scala:69:7, :88:22]
wire next_p2 = p2; // @[issue-slot.scala:88:22, :164:25]
reg p3; // @[issue-slot.scala:89:22]
assign io_debug_p3_0 = p3; // @[issue-slot.scala:69:7, :89:22]
wire next_p3 = p3; // @[issue-slot.scala:89:22, :165:25]
reg ppred; // @[issue-slot.scala:90:22]
assign io_debug_ppred_0 = ppred; // @[issue-slot.scala:69:7, :90:22]
wire next_ppred = ppred; // @[issue-slot.scala:90:22, :166:28]
reg [6:0] slot_uop_uopc; // @[issue-slot.scala:102:25]
reg [31:0] slot_uop_inst; // @[issue-slot.scala:102:25]
assign io_out_uop_inst_0 = slot_uop_inst; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_inst_0 = slot_uop_inst; // @[issue-slot.scala:69:7, :102:25]
reg [31:0] slot_uop_debug_inst; // @[issue-slot.scala:102:25]
assign io_out_uop_debug_inst_0 = slot_uop_debug_inst; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_debug_inst_0 = slot_uop_debug_inst; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_rvc; // @[issue-slot.scala:102:25]
assign io_out_uop_is_rvc_0 = slot_uop_is_rvc; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_rvc_0 = slot_uop_is_rvc; // @[issue-slot.scala:69:7, :102:25]
reg [39:0] slot_uop_debug_pc; // @[issue-slot.scala:102:25]
assign io_out_uop_debug_pc_0 = slot_uop_debug_pc; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_debug_pc_0 = slot_uop_debug_pc; // @[issue-slot.scala:69:7, :102:25]
reg [2:0] slot_uop_iq_type; // @[issue-slot.scala:102:25]
assign io_out_uop_iq_type_0 = slot_uop_iq_type; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_iq_type_0 = slot_uop_iq_type; // @[issue-slot.scala:69:7, :102:25]
reg [9:0] slot_uop_fu_code; // @[issue-slot.scala:102:25]
assign io_out_uop_fu_code_0 = slot_uop_fu_code; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_fu_code_0 = slot_uop_fu_code; // @[issue-slot.scala:69:7, :102:25]
reg [3:0] slot_uop_ctrl_br_type; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_br_type_0 = slot_uop_ctrl_br_type; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_br_type_0 = slot_uop_ctrl_br_type; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_ctrl_op1_sel; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_op1_sel_0 = slot_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_op1_sel_0 = slot_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7, :102:25]
reg [2:0] slot_uop_ctrl_op2_sel; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_op2_sel_0 = slot_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_op2_sel_0 = slot_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7, :102:25]
reg [2:0] slot_uop_ctrl_imm_sel; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_imm_sel_0 = slot_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_imm_sel_0 = slot_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7, :102:25]
reg [4:0] slot_uop_ctrl_op_fcn; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_op_fcn_0 = slot_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_op_fcn_0 = slot_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_fcn_dw_0 = slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_fcn_dw_0 = slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7, :102:25]
reg [2:0] slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_csr_cmd_0 = slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_csr_cmd_0 = slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_ctrl_is_load; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_is_load_0 = slot_uop_ctrl_is_load; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_is_load_0 = slot_uop_ctrl_is_load; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_ctrl_is_sta; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_is_sta_0 = slot_uop_ctrl_is_sta; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_is_sta_0 = slot_uop_ctrl_is_sta; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_ctrl_is_std; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_is_std_0 = slot_uop_ctrl_is_std; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_is_std_0 = slot_uop_ctrl_is_std; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_iw_state; // @[issue-slot.scala:102:25]
assign io_uop_iw_state_0 = slot_uop_iw_state; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_iw_p1_poisoned; // @[issue-slot.scala:102:25]
reg slot_uop_iw_p2_poisoned; // @[issue-slot.scala:102:25]
reg slot_uop_is_br; // @[issue-slot.scala:102:25]
assign io_out_uop_is_br_0 = slot_uop_is_br; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_br_0 = slot_uop_is_br; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_jalr; // @[issue-slot.scala:102:25]
assign io_out_uop_is_jalr_0 = slot_uop_is_jalr; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_jalr_0 = slot_uop_is_jalr; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_jal; // @[issue-slot.scala:102:25]
assign io_out_uop_is_jal_0 = slot_uop_is_jal; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_jal_0 = slot_uop_is_jal; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_sfb; // @[issue-slot.scala:102:25]
assign io_out_uop_is_sfb_0 = slot_uop_is_sfb; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_sfb_0 = slot_uop_is_sfb; // @[issue-slot.scala:69:7, :102:25]
reg [7:0] slot_uop_br_mask; // @[issue-slot.scala:102:25]
assign io_uop_br_mask_0 = slot_uop_br_mask; // @[issue-slot.scala:69:7, :102:25]
reg [2:0] slot_uop_br_tag; // @[issue-slot.scala:102:25]
assign io_out_uop_br_tag_0 = slot_uop_br_tag; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_br_tag_0 = slot_uop_br_tag; // @[issue-slot.scala:69:7, :102:25]
reg [3:0] slot_uop_ftq_idx; // @[issue-slot.scala:102:25]
assign io_out_uop_ftq_idx_0 = slot_uop_ftq_idx; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ftq_idx_0 = slot_uop_ftq_idx; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_edge_inst; // @[issue-slot.scala:102:25]
assign io_out_uop_edge_inst_0 = slot_uop_edge_inst; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_edge_inst_0 = slot_uop_edge_inst; // @[issue-slot.scala:69:7, :102:25]
reg [5:0] slot_uop_pc_lob; // @[issue-slot.scala:102:25]
assign io_out_uop_pc_lob_0 = slot_uop_pc_lob; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_pc_lob_0 = slot_uop_pc_lob; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_taken; // @[issue-slot.scala:102:25]
assign io_out_uop_taken_0 = slot_uop_taken; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_taken_0 = slot_uop_taken; // @[issue-slot.scala:69:7, :102:25]
reg [19:0] slot_uop_imm_packed; // @[issue-slot.scala:102:25]
assign io_out_uop_imm_packed_0 = slot_uop_imm_packed; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_imm_packed_0 = slot_uop_imm_packed; // @[issue-slot.scala:69:7, :102:25]
reg [11:0] slot_uop_csr_addr; // @[issue-slot.scala:102:25]
assign io_out_uop_csr_addr_0 = slot_uop_csr_addr; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_csr_addr_0 = slot_uop_csr_addr; // @[issue-slot.scala:69:7, :102:25]
reg [4:0] slot_uop_rob_idx; // @[issue-slot.scala:102:25]
assign io_out_uop_rob_idx_0 = slot_uop_rob_idx; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_rob_idx_0 = slot_uop_rob_idx; // @[issue-slot.scala:69:7, :102:25]
reg [2:0] slot_uop_ldq_idx; // @[issue-slot.scala:102:25]
assign io_out_uop_ldq_idx_0 = slot_uop_ldq_idx; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ldq_idx_0 = slot_uop_ldq_idx; // @[issue-slot.scala:69:7, :102:25]
reg [2:0] slot_uop_stq_idx; // @[issue-slot.scala:102:25]
assign io_out_uop_stq_idx_0 = slot_uop_stq_idx; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_stq_idx_0 = slot_uop_stq_idx; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_rxq_idx; // @[issue-slot.scala:102:25]
assign io_out_uop_rxq_idx_0 = slot_uop_rxq_idx; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_rxq_idx_0 = slot_uop_rxq_idx; // @[issue-slot.scala:69:7, :102:25]
reg [5:0] slot_uop_pdst; // @[issue-slot.scala:102:25]
assign io_out_uop_pdst_0 = slot_uop_pdst; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_pdst_0 = slot_uop_pdst; // @[issue-slot.scala:69:7, :102:25]
reg [5:0] slot_uop_prs1; // @[issue-slot.scala:102:25]
assign io_out_uop_prs1_0 = slot_uop_prs1; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_prs1_0 = slot_uop_prs1; // @[issue-slot.scala:69:7, :102:25]
reg [5:0] slot_uop_prs2; // @[issue-slot.scala:102:25]
assign io_out_uop_prs2_0 = slot_uop_prs2; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_prs2_0 = slot_uop_prs2; // @[issue-slot.scala:69:7, :102:25]
reg [5:0] slot_uop_prs3; // @[issue-slot.scala:102:25]
assign io_out_uop_prs3_0 = slot_uop_prs3; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_prs3_0 = slot_uop_prs3; // @[issue-slot.scala:69:7, :102:25]
reg [3:0] slot_uop_ppred; // @[issue-slot.scala:102:25]
assign io_out_uop_ppred_0 = slot_uop_ppred; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ppred_0 = slot_uop_ppred; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_prs1_busy; // @[issue-slot.scala:102:25]
assign io_uop_prs1_busy_0 = slot_uop_prs1_busy; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_prs2_busy; // @[issue-slot.scala:102:25]
assign io_uop_prs2_busy_0 = slot_uop_prs2_busy; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_prs3_busy; // @[issue-slot.scala:102:25]
assign io_uop_prs3_busy_0 = slot_uop_prs3_busy; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_ppred_busy; // @[issue-slot.scala:102:25]
assign io_uop_ppred_busy_0 = slot_uop_ppred_busy; // @[issue-slot.scala:69:7, :102:25]
reg [5:0] slot_uop_stale_pdst; // @[issue-slot.scala:102:25]
assign io_out_uop_stale_pdst_0 = slot_uop_stale_pdst; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_stale_pdst_0 = slot_uop_stale_pdst; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_exception; // @[issue-slot.scala:102:25]
assign io_out_uop_exception_0 = slot_uop_exception; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_exception_0 = slot_uop_exception; // @[issue-slot.scala:69:7, :102:25]
reg [63:0] slot_uop_exc_cause; // @[issue-slot.scala:102:25]
assign io_out_uop_exc_cause_0 = slot_uop_exc_cause; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_exc_cause_0 = slot_uop_exc_cause; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_bypassable; // @[issue-slot.scala:102:25]
assign io_out_uop_bypassable_0 = slot_uop_bypassable; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_bypassable_0 = slot_uop_bypassable; // @[issue-slot.scala:69:7, :102:25]
reg [4:0] slot_uop_mem_cmd; // @[issue-slot.scala:102:25]
assign io_out_uop_mem_cmd_0 = slot_uop_mem_cmd; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_mem_cmd_0 = slot_uop_mem_cmd; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_mem_size; // @[issue-slot.scala:102:25]
assign io_out_uop_mem_size_0 = slot_uop_mem_size; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_mem_size_0 = slot_uop_mem_size; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_mem_signed; // @[issue-slot.scala:102:25]
assign io_out_uop_mem_signed_0 = slot_uop_mem_signed; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_mem_signed_0 = slot_uop_mem_signed; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_fence; // @[issue-slot.scala:102:25]
assign io_out_uop_is_fence_0 = slot_uop_is_fence; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_fence_0 = slot_uop_is_fence; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_fencei; // @[issue-slot.scala:102:25]
assign io_out_uop_is_fencei_0 = slot_uop_is_fencei; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_fencei_0 = slot_uop_is_fencei; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_amo; // @[issue-slot.scala:102:25]
assign io_out_uop_is_amo_0 = slot_uop_is_amo; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_amo_0 = slot_uop_is_amo; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_uses_ldq; // @[issue-slot.scala:102:25]
assign io_out_uop_uses_ldq_0 = slot_uop_uses_ldq; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_uses_ldq_0 = slot_uop_uses_ldq; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_uses_stq; // @[issue-slot.scala:102:25]
assign io_out_uop_uses_stq_0 = slot_uop_uses_stq; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_uses_stq_0 = slot_uop_uses_stq; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_sys_pc2epc; // @[issue-slot.scala:102:25]
assign io_out_uop_is_sys_pc2epc_0 = slot_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_sys_pc2epc_0 = slot_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_unique; // @[issue-slot.scala:102:25]
assign io_out_uop_is_unique_0 = slot_uop_is_unique; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_unique_0 = slot_uop_is_unique; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_flush_on_commit; // @[issue-slot.scala:102:25]
assign io_out_uop_flush_on_commit_0 = slot_uop_flush_on_commit; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_flush_on_commit_0 = slot_uop_flush_on_commit; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_ldst_is_rs1; // @[issue-slot.scala:102:25]
assign io_out_uop_ldst_is_rs1_0 = slot_uop_ldst_is_rs1; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ldst_is_rs1_0 = slot_uop_ldst_is_rs1; // @[issue-slot.scala:69:7, :102:25]
reg [5:0] slot_uop_ldst; // @[issue-slot.scala:102:25]
assign io_out_uop_ldst_0 = slot_uop_ldst; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ldst_0 = slot_uop_ldst; // @[issue-slot.scala:69:7, :102:25]
reg [5:0] slot_uop_lrs1; // @[issue-slot.scala:102:25]
assign io_out_uop_lrs1_0 = slot_uop_lrs1; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_lrs1_0 = slot_uop_lrs1; // @[issue-slot.scala:69:7, :102:25]
reg [5:0] slot_uop_lrs2; // @[issue-slot.scala:102:25]
assign io_out_uop_lrs2_0 = slot_uop_lrs2; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_lrs2_0 = slot_uop_lrs2; // @[issue-slot.scala:69:7, :102:25]
reg [5:0] slot_uop_lrs3; // @[issue-slot.scala:102:25]
assign io_out_uop_lrs3_0 = slot_uop_lrs3; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_lrs3_0 = slot_uop_lrs3; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_ldst_val; // @[issue-slot.scala:102:25]
assign io_out_uop_ldst_val_0 = slot_uop_ldst_val; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ldst_val_0 = slot_uop_ldst_val; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_dst_rtype; // @[issue-slot.scala:102:25]
assign io_out_uop_dst_rtype_0 = slot_uop_dst_rtype; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_dst_rtype_0 = slot_uop_dst_rtype; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_lrs1_rtype; // @[issue-slot.scala:102:25]
reg [1:0] slot_uop_lrs2_rtype; // @[issue-slot.scala:102:25]
reg slot_uop_frs3_en; // @[issue-slot.scala:102:25]
assign io_out_uop_frs3_en_0 = slot_uop_frs3_en; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_frs3_en_0 = slot_uop_frs3_en; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_fp_val; // @[issue-slot.scala:102:25]
assign io_out_uop_fp_val_0 = slot_uop_fp_val; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_fp_val_0 = slot_uop_fp_val; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_fp_single; // @[issue-slot.scala:102:25]
assign io_out_uop_fp_single_0 = slot_uop_fp_single; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_fp_single_0 = slot_uop_fp_single; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_xcpt_pf_if; // @[issue-slot.scala:102:25]
assign io_out_uop_xcpt_pf_if_0 = slot_uop_xcpt_pf_if; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_xcpt_pf_if_0 = slot_uop_xcpt_pf_if; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_xcpt_ae_if; // @[issue-slot.scala:102:25]
assign io_out_uop_xcpt_ae_if_0 = slot_uop_xcpt_ae_if; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_xcpt_ae_if_0 = slot_uop_xcpt_ae_if; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_xcpt_ma_if; // @[issue-slot.scala:102:25]
assign io_out_uop_xcpt_ma_if_0 = slot_uop_xcpt_ma_if; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_xcpt_ma_if_0 = slot_uop_xcpt_ma_if; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_bp_debug_if; // @[issue-slot.scala:102:25]
assign io_out_uop_bp_debug_if_0 = slot_uop_bp_debug_if; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_bp_debug_if_0 = slot_uop_bp_debug_if; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_bp_xcpt_if; // @[issue-slot.scala:102:25]
assign io_out_uop_bp_xcpt_if_0 = slot_uop_bp_xcpt_if; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_bp_xcpt_if_0 = slot_uop_bp_xcpt_if; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_debug_fsrc; // @[issue-slot.scala:102:25]
assign io_out_uop_debug_fsrc_0 = slot_uop_debug_fsrc; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_debug_fsrc_0 = slot_uop_debug_fsrc; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_debug_tsrc; // @[issue-slot.scala:102:25]
assign io_out_uop_debug_tsrc_0 = slot_uop_debug_tsrc; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_debug_tsrc_0 = slot_uop_debug_tsrc; // @[issue-slot.scala:69:7, :102:25]
wire [6:0] next_uop_uopc = io_in_uop_valid_0 ? io_in_uop_bits_uopc_0 : slot_uop_uopc; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [31:0] next_uop_inst = io_in_uop_valid_0 ? io_in_uop_bits_inst_0 : slot_uop_inst; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [31:0] next_uop_debug_inst = io_in_uop_valid_0 ? io_in_uop_bits_debug_inst_0 : slot_uop_debug_inst; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_rvc = io_in_uop_valid_0 ? io_in_uop_bits_is_rvc_0 : slot_uop_is_rvc; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [39:0] next_uop_debug_pc = io_in_uop_valid_0 ? io_in_uop_bits_debug_pc_0 : slot_uop_debug_pc; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [2:0] next_uop_iq_type = io_in_uop_valid_0 ? io_in_uop_bits_iq_type_0 : slot_uop_iq_type; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [9:0] next_uop_fu_code = io_in_uop_valid_0 ? io_in_uop_bits_fu_code_0 : slot_uop_fu_code; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [3:0] next_uop_ctrl_br_type = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_br_type_0 : slot_uop_ctrl_br_type; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_ctrl_op1_sel = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_op1_sel_0 : slot_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [2:0] next_uop_ctrl_op2_sel = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_op2_sel_0 : slot_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [2:0] next_uop_ctrl_imm_sel = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_imm_sel_0 : slot_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [4:0] next_uop_ctrl_op_fcn = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_op_fcn_0 : slot_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_ctrl_fcn_dw = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_fcn_dw_0 : slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [2:0] next_uop_ctrl_csr_cmd = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_csr_cmd_0 : slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_ctrl_is_load = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_is_load_0 : slot_uop_ctrl_is_load; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_ctrl_is_sta = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_is_sta_0 : slot_uop_ctrl_is_sta; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_ctrl_is_std = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_is_std_0 : slot_uop_ctrl_is_std; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_iw_state = io_in_uop_valid_0 ? io_in_uop_bits_iw_state_0 : slot_uop_iw_state; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_iw_p1_poisoned = ~io_in_uop_valid_0 & slot_uop_iw_p1_poisoned; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_iw_p2_poisoned = ~io_in_uop_valid_0 & slot_uop_iw_p2_poisoned; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_br = io_in_uop_valid_0 ? io_in_uop_bits_is_br_0 : slot_uop_is_br; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_jalr = io_in_uop_valid_0 ? io_in_uop_bits_is_jalr_0 : slot_uop_is_jalr; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_jal = io_in_uop_valid_0 ? io_in_uop_bits_is_jal_0 : slot_uop_is_jal; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_sfb = io_in_uop_valid_0 ? io_in_uop_bits_is_sfb_0 : slot_uop_is_sfb; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [7:0] next_uop_br_mask = io_in_uop_valid_0 ? io_in_uop_bits_br_mask_0 : slot_uop_br_mask; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [2:0] next_uop_br_tag = io_in_uop_valid_0 ? io_in_uop_bits_br_tag_0 : slot_uop_br_tag; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [3:0] next_uop_ftq_idx = io_in_uop_valid_0 ? io_in_uop_bits_ftq_idx_0 : slot_uop_ftq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_edge_inst = io_in_uop_valid_0 ? io_in_uop_bits_edge_inst_0 : slot_uop_edge_inst; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [5:0] next_uop_pc_lob = io_in_uop_valid_0 ? io_in_uop_bits_pc_lob_0 : slot_uop_pc_lob; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_taken = io_in_uop_valid_0 ? io_in_uop_bits_taken_0 : slot_uop_taken; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [19:0] next_uop_imm_packed = io_in_uop_valid_0 ? io_in_uop_bits_imm_packed_0 : slot_uop_imm_packed; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [11:0] next_uop_csr_addr = io_in_uop_valid_0 ? io_in_uop_bits_csr_addr_0 : slot_uop_csr_addr; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [4:0] next_uop_rob_idx = io_in_uop_valid_0 ? io_in_uop_bits_rob_idx_0 : slot_uop_rob_idx; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [2:0] next_uop_ldq_idx = io_in_uop_valid_0 ? io_in_uop_bits_ldq_idx_0 : slot_uop_ldq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [2:0] next_uop_stq_idx = io_in_uop_valid_0 ? io_in_uop_bits_stq_idx_0 : slot_uop_stq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_rxq_idx = io_in_uop_valid_0 ? io_in_uop_bits_rxq_idx_0 : slot_uop_rxq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [5:0] next_uop_pdst = io_in_uop_valid_0 ? io_in_uop_bits_pdst_0 : slot_uop_pdst; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [5:0] next_uop_prs1 = io_in_uop_valid_0 ? io_in_uop_bits_prs1_0 : slot_uop_prs1; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [5:0] next_uop_prs2 = io_in_uop_valid_0 ? io_in_uop_bits_prs2_0 : slot_uop_prs2; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [5:0] next_uop_prs3 = io_in_uop_valid_0 ? io_in_uop_bits_prs3_0 : slot_uop_prs3; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [3:0] next_uop_ppred = io_in_uop_valid_0 ? io_in_uop_bits_ppred_0 : slot_uop_ppred; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_prs1_busy = io_in_uop_valid_0 ? io_in_uop_bits_prs1_busy_0 : slot_uop_prs1_busy; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_prs2_busy = io_in_uop_valid_0 ? io_in_uop_bits_prs2_busy_0 : slot_uop_prs2_busy; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_prs3_busy = io_in_uop_valid_0 ? io_in_uop_bits_prs3_busy_0 : slot_uop_prs3_busy; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_ppred_busy = io_in_uop_valid_0 ? io_in_uop_bits_ppred_busy_0 : slot_uop_ppred_busy; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [5:0] next_uop_stale_pdst = io_in_uop_valid_0 ? io_in_uop_bits_stale_pdst_0 : slot_uop_stale_pdst; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_exception = io_in_uop_valid_0 ? io_in_uop_bits_exception_0 : slot_uop_exception; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [63:0] next_uop_exc_cause = io_in_uop_valid_0 ? io_in_uop_bits_exc_cause_0 : slot_uop_exc_cause; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_bypassable = io_in_uop_valid_0 ? io_in_uop_bits_bypassable_0 : slot_uop_bypassable; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [4:0] next_uop_mem_cmd = io_in_uop_valid_0 ? io_in_uop_bits_mem_cmd_0 : slot_uop_mem_cmd; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_mem_size = io_in_uop_valid_0 ? io_in_uop_bits_mem_size_0 : slot_uop_mem_size; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_mem_signed = io_in_uop_valid_0 ? io_in_uop_bits_mem_signed_0 : slot_uop_mem_signed; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_fence = io_in_uop_valid_0 ? io_in_uop_bits_is_fence_0 : slot_uop_is_fence; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_fencei = io_in_uop_valid_0 ? io_in_uop_bits_is_fencei_0 : slot_uop_is_fencei; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_amo = io_in_uop_valid_0 ? io_in_uop_bits_is_amo_0 : slot_uop_is_amo; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_uses_ldq = io_in_uop_valid_0 ? io_in_uop_bits_uses_ldq_0 : slot_uop_uses_ldq; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_uses_stq = io_in_uop_valid_0 ? io_in_uop_bits_uses_stq_0 : slot_uop_uses_stq; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_sys_pc2epc = io_in_uop_valid_0 ? io_in_uop_bits_is_sys_pc2epc_0 : slot_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_unique = io_in_uop_valid_0 ? io_in_uop_bits_is_unique_0 : slot_uop_is_unique; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_flush_on_commit = io_in_uop_valid_0 ? io_in_uop_bits_flush_on_commit_0 : slot_uop_flush_on_commit; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_ldst_is_rs1 = io_in_uop_valid_0 ? io_in_uop_bits_ldst_is_rs1_0 : slot_uop_ldst_is_rs1; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [5:0] next_uop_ldst = io_in_uop_valid_0 ? io_in_uop_bits_ldst_0 : slot_uop_ldst; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [5:0] next_uop_lrs1 = io_in_uop_valid_0 ? io_in_uop_bits_lrs1_0 : slot_uop_lrs1; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [5:0] next_uop_lrs2 = io_in_uop_valid_0 ? io_in_uop_bits_lrs2_0 : slot_uop_lrs2; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [5:0] next_uop_lrs3 = io_in_uop_valid_0 ? io_in_uop_bits_lrs3_0 : slot_uop_lrs3; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_ldst_val = io_in_uop_valid_0 ? io_in_uop_bits_ldst_val_0 : slot_uop_ldst_val; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_dst_rtype = io_in_uop_valid_0 ? io_in_uop_bits_dst_rtype_0 : slot_uop_dst_rtype; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_lrs1_rtype = io_in_uop_valid_0 ? io_in_uop_bits_lrs1_rtype_0 : slot_uop_lrs1_rtype; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_lrs2_rtype = io_in_uop_valid_0 ? io_in_uop_bits_lrs2_rtype_0 : slot_uop_lrs2_rtype; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_frs3_en = io_in_uop_valid_0 ? io_in_uop_bits_frs3_en_0 : slot_uop_frs3_en; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_fp_val = io_in_uop_valid_0 ? io_in_uop_bits_fp_val_0 : slot_uop_fp_val; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_fp_single = io_in_uop_valid_0 ? io_in_uop_bits_fp_single_0 : slot_uop_fp_single; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_xcpt_pf_if = io_in_uop_valid_0 ? io_in_uop_bits_xcpt_pf_if_0 : slot_uop_xcpt_pf_if; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_xcpt_ae_if = io_in_uop_valid_0 ? io_in_uop_bits_xcpt_ae_if_0 : slot_uop_xcpt_ae_if; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_xcpt_ma_if = io_in_uop_valid_0 ? io_in_uop_bits_xcpt_ma_if_0 : slot_uop_xcpt_ma_if; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_bp_debug_if = io_in_uop_valid_0 ? io_in_uop_bits_bp_debug_if_0 : slot_uop_bp_debug_if; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_bp_xcpt_if = io_in_uop_valid_0 ? io_in_uop_bits_bp_xcpt_if_0 : slot_uop_bp_xcpt_if; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_debug_fsrc = io_in_uop_valid_0 ? io_in_uop_bits_debug_fsrc_0 : slot_uop_debug_fsrc; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_debug_tsrc = io_in_uop_valid_0 ? io_in_uop_bits_debug_tsrc_0 : slot_uop_debug_tsrc; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire _T_11 = state == 2'h2; // @[issue-slot.scala:86:22, :134:25]
wire _T_7 = io_grant_0 & state == 2'h1 | io_grant_0 & _T_11 & p1 & p2 & ppred; // @[issue-slot.scala:69:7, :86:22, :87:22, :88:22, :90:22, :133:{26,36,52}, :134:{15,25,40,46,52}]
wire _T_12 = io_grant_0 & _T_11; // @[issue-slot.scala:69:7, :134:25, :139:25]
wire _GEN = io_kill_0 | _T_7; // @[issue-slot.scala:69:7, :102:25, :131:18, :133:52, :134:63, :139:51]
wire _GEN_0 = _GEN | ~(_T_12 & p1); // @[issue-slot.scala:87:22, :102:25, :131:18, :134:63, :139:{25,51}, :140:62, :142:17, :143:23]
assign next_uopc = _GEN_0 ? slot_uop_uopc : 7'h3; // @[issue-slot.scala:82:29, :102:25, :131:18, :134:63, :139:51]
assign next_lrs1_rtype = _GEN_0 ? slot_uop_lrs1_rtype : 2'h2; // @[issue-slot.scala:83:29, :102:25, :131:18, :134:63, :139:51]
wire _GEN_1 = _GEN | ~_T_12 | p1; // @[issue-slot.scala:87:22, :102:25, :131:18, :134:63, :139:{25,51}, :140:62, :142:17]
assign next_lrs2_rtype = _GEN_1 ? slot_uop_lrs2_rtype : 2'h2; // @[issue-slot.scala:84:29, :102:25, :131:18, :134:63, :139:51, :140:62, :142:17] |
Generate the Verilog code corresponding to the following Chisel files.
File Decode.scala:
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util.BitPat
import chisel3.util.experimental.decode._
object DecodeLogic
{
// TODO This should be a method on BitPat
private def hasDontCare(bp: BitPat): Boolean = bp.mask.bitCount != bp.width
// Pads BitPats that are safe to pad (no don't cares), errors otherwise
private def padBP(bp: BitPat, width: Int): BitPat = {
if (bp.width == width) bp
else {
require(!hasDontCare(bp), s"Cannot pad '$bp' to '$width' bits because it has don't cares")
val diff = width - bp.width
require(diff > 0, s"Cannot pad '$bp' to '$width' because it is already '${bp.width}' bits wide!")
BitPat(0.U(diff.W)) ## bp
}
}
def apply(addr: UInt, default: BitPat, mapping: Iterable[(BitPat, BitPat)]): UInt =
chisel3.util.experimental.decode.decoder(QMCMinimizer, addr, TruthTable(mapping, default))
def apply(addr: UInt, default: Seq[BitPat], mappingIn: Iterable[(BitPat, Seq[BitPat])]): Seq[UInt] = {
val nElts = default.size
require(mappingIn.forall(_._2.size == nElts),
s"All Seq[BitPat] must be of the same length, got $nElts vs. ${mappingIn.find(_._2.size != nElts).get}"
)
val elementsGrouped = mappingIn.map(_._2).transpose
val elementWidths = elementsGrouped.zip(default).map { case (elts, default) =>
(default :: elts.toList).map(_.getWidth).max
}
val resultWidth = elementWidths.sum
val elementIndices = elementWidths.scan(resultWidth - 1) { case (l, r) => l - r }
// All BitPats that correspond to a given element in the result must have the same width in the
// chisel3 decoder. We will zero pad any BitPats that are too small so long as they dont have
// any don't cares. If there are don't cares, it is an error and the user needs to pad the
// BitPat themselves
val defaultsPadded = default.zip(elementWidths).map { case (bp, w) => padBP(bp, w) }
val mappingInPadded = mappingIn.map { case (in, elts) =>
in -> elts.zip(elementWidths).map { case (bp, w) => padBP(bp, w) }
}
val decoded = apply(addr, defaultsPadded.reduce(_ ## _), mappingInPadded.map { case (in, out) => (in, out.reduce(_ ## _)) })
elementIndices.zip(elementIndices.tail).map { case (msb, lsb) => decoded(msb, lsb + 1) }.toList
}
def apply(addr: UInt, default: Seq[BitPat], mappingIn: List[(UInt, Seq[BitPat])]): Seq[UInt] =
apply(addr, default, mappingIn.map(m => (BitPat(m._1), m._2)).asInstanceOf[Iterable[(BitPat, Seq[BitPat])]])
def apply(addr: UInt, trues: Iterable[UInt], falses: Iterable[UInt]): Bool =
apply(addr, BitPat.dontCare(1), trues.map(BitPat(_) -> BitPat("b1")) ++ falses.map(BitPat(_) -> BitPat("b0"))).asBool
}
File fdiv.scala:
//******************************************************************************
// Copyright (c) 2016 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// FDiv/FSqrt Unit
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.exu
import chisel3._
import chisel3.util._
import chisel3.experimental.dataview._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.tile.FPConstants._
import freechips.rocketchip.tile
import boom.v3.common._
import boom.v3.util._
import freechips.rocketchip.tile.HasFPUParameters
import freechips.rocketchip.util.uintToBitPat
/**
* Decoder for FPU divide and square root signals
*/
class UOPCodeFDivDecoder(implicit p: Parameters) extends BoomModule
with HasFPUParameters
{
val io = IO(new Bundle {
val uopc = Input(Bits(UOPC_SZ.W))
val sigs = Output(new tile.FPUCtrlSigs())
})
val N = BitPat("b0")
val Y = BitPat("b1")
val X = BitPat("b?")
val decoder = freechips.rocketchip.rocket.DecodeLogic(io.uopc,
// Note: not all of these signals are used or necessary, but we're
// constrained by the need to fit the rocket.FPU units' ctrl signals.
// swap12 fma
// | swap32 | div
// | | typeTagIn | | sqrt
// ldst | | | typeTagOut | | wflags
// | wen | | | | from_int | | |
// | | ren1 | | | | | to_int | | |
// | | | ren2 | | | | | | fast | | |
// | | | | ren3 | | | | | | | | | |
// | | | | | | | | | | | | | | | |
/* Default */ List(X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X),
Array(
BitPat(uopFDIV_S) -> List(X,X,Y,Y,X, X,X,S,S,X,X,X, X,Y,N,Y),
BitPat(uopFDIV_D) -> List(X,X,Y,Y,X, X,X,D,D,X,X,X, X,Y,N,Y),
BitPat(uopFSQRT_S) -> List(X,X,Y,N,X, X,X,S,S,X,X,X, X,N,Y,Y),
BitPat(uopFSQRT_D) -> List(X,X,Y,N,X, X,X,D,D,X,X,X, X,N,Y,Y)
): Array[(BitPat, List[BitPat])])
val s = io.sigs
val sigs = Seq(s.ldst, s.wen, s.ren1, s.ren2, s.ren3, s.swap12,
s.swap23, s.typeTagIn, s.typeTagOut, s.fromint, s.toint, s.fastpipe, s.fma,
s.div, s.sqrt, s.wflags)
s.vec := false.B
sigs zip decoder map {case(s,d) => s := d}
}
/**
* fdiv/fsqrt is douple-precision. Must upconvert inputs and downconvert outputs
* as necessary. Must wait till killed uop finishes before we're ready again.
* fdiv/fsqrt unit uses an unstable FIFO interface, and thus we must spend a
* cycle buffering up an uop to provide slack between the issue queue and the
* fdiv/fsqrt unit. FDivUnit inherents directly from FunctionalUnit, because
* UnpipelinedFunctionalUnit can only handle 1 inflight uop, whereas FDivUnit
* contains up to 2 inflight uops due to the need to buffer the input as the
* fdiv unit uses an unstable FIFO interface.
* TODO extend UnpipelinedFunctionalUnit to handle a >1 uops inflight.
*
* @param isPipelined is the functional unit pipelined
* @param numStages number of stages for the functional unit
* @param numBypassStages number of bypass stages
* @param dataWidth width of the data out of the functional unit
*/
class FDivSqrtUnit(implicit p: Parameters)
extends FunctionalUnit(
isPipelined = false,
numStages = 1,
numBypassStages = 0,
dataWidth = 65,
needsFcsr = true)
with tile.HasFPUParameters
{
//--------------------------------------
// buffer inputs and upconvert as needed
// provide a one-entry queue to store incoming uops while waiting for the fdiv/fsqrt unit to become available.
val r_buffer_val = RegInit(false.B)
val r_buffer_req = Reg(new FuncUnitReq(dataWidth=65))
val r_buffer_fin = Reg(new tile.FPInput)
val fdiv_decoder = Module(new UOPCodeFDivDecoder)
fdiv_decoder.io.uopc := io.req.bits.uop.uopc
// handle branch kill on queued entry
r_buffer_val := !IsKilledByBranch(io.brupdate, r_buffer_req.uop) && !io.req.bits.kill && r_buffer_val
r_buffer_req.uop.br_mask := GetNewBrMask(io.brupdate, r_buffer_req.uop)
// handle incoming uop, including upconversion as needed, and push back if our input queue is already occupied
io.req.ready := !r_buffer_val
def upconvert(x: UInt) = {
val s2d = Module(new hardfloat.RecFNToRecFN(inExpWidth = 8, inSigWidth = 24, outExpWidth = 11, outSigWidth = 53))
s2d.io.in := x
s2d.io.roundingMode := 0.U
s2d.io.detectTininess := DontCare
s2d.io.out
}
val in1_upconvert = upconvert(unbox(io.req.bits.rs1_data, false.B, Some(tile.FType.S)))
val in2_upconvert = upconvert(unbox(io.req.bits.rs2_data, false.B, Some(tile.FType.S)))
when (io.req.valid && !IsKilledByBranch(io.brupdate, io.req.bits.uop) && !io.req.bits.kill) {
r_buffer_val := true.B
r_buffer_req := io.req.bits
r_buffer_req.uop.br_mask := GetNewBrMask(io.brupdate, io.req.bits.uop)
r_buffer_fin.viewAsSupertype(new tile.FPUCtrlSigs) := fdiv_decoder.io.sigs
r_buffer_fin.rm := Mux(ImmGenRm(io.req.bits.uop.imm_packed) === 7.U, io.fcsr_rm, ImmGenRm(io.req.bits.uop.imm_packed))
r_buffer_fin.typ := 0.U // unused for fdivsqrt
val tag = fdiv_decoder.io.sigs.typeTagIn
r_buffer_fin.in1 := unbox(io.req.bits.rs1_data, tag, Some(tile.FType.D))
r_buffer_fin.in2 := unbox(io.req.bits.rs2_data, tag, Some(tile.FType.D))
when (tag === S) {
r_buffer_fin.in1 := in1_upconvert
r_buffer_fin.in2 := in2_upconvert
}
}
assert (!(r_buffer_val && io.req.valid), "[fdiv] a request is incoming while the buffer is already full.")
//-----------
// fdiv/fsqrt
val divsqrt = Module(new hardfloat.DivSqrtRecF64)
val r_divsqrt_val = RegInit(false.B) // inflight uop?
val r_divsqrt_killed = Reg(Bool()) // has inflight uop been killed?
val r_divsqrt_fin = Reg(new tile.FPInput)
val r_divsqrt_uop = Reg(new MicroOp)
// Need to buffer output until RF writeport is available.
val output_buffer_available = Wire(Bool())
val may_fire_input =
r_buffer_val &&
(r_buffer_fin.div || r_buffer_fin.sqrt) &&
!r_divsqrt_val &&
output_buffer_available
val divsqrt_ready = Mux(divsqrt.io.sqrtOp, divsqrt.io.inReady_sqrt, divsqrt.io.inReady_div)
divsqrt.io.inValid := may_fire_input // must be setup early
divsqrt.io.sqrtOp := r_buffer_fin.sqrt
divsqrt.io.a := r_buffer_fin.in1
divsqrt.io.b := Mux(divsqrt.io.sqrtOp, r_buffer_fin.in1, r_buffer_fin.in2)
divsqrt.io.roundingMode := r_buffer_fin.rm
divsqrt.io.detectTininess := DontCare
r_divsqrt_killed := r_divsqrt_killed || IsKilledByBranch(io.brupdate, r_divsqrt_uop) || io.req.bits.kill
r_divsqrt_uop.br_mask := GetNewBrMask(io.brupdate, r_divsqrt_uop)
when (may_fire_input && divsqrt_ready) {
// Remove entry from the input buffer.
// We don't have time to kill divsqrt request so must track if killed on entry.
r_buffer_val := false.B
r_divsqrt_val := true.B
r_divsqrt_fin := r_buffer_fin
r_divsqrt_uop := r_buffer_req.uop
r_divsqrt_killed := IsKilledByBranch(io.brupdate, r_buffer_req.uop) || io.req.bits.kill
r_divsqrt_uop.br_mask := GetNewBrMask(io.brupdate, r_buffer_req.uop)
}
//-----------------------------------------
// buffer output and down-convert as needed
val r_out_val = RegInit(false.B)
val r_out_uop = Reg(new MicroOp)
val r_out_flags_double = Reg(Bits())
val r_out_wdata_double = Reg(Bits())
output_buffer_available := !r_out_val
r_out_uop.br_mask := GetNewBrMask(io.brupdate, r_out_uop)
when (io.resp.ready || IsKilledByBranch(io.brupdate, r_out_uop) || io.req.bits.kill) {
r_out_val := false.B
}
when (divsqrt.io.outValid_div || divsqrt.io.outValid_sqrt) {
r_divsqrt_val := false.B
r_out_val := !r_divsqrt_killed && !IsKilledByBranch(io.brupdate, r_divsqrt_uop) && !io.req.bits.kill
r_out_uop := r_divsqrt_uop
r_out_uop.br_mask := GetNewBrMask(io.brupdate, r_divsqrt_uop)
r_out_wdata_double := sanitizeNaN(divsqrt.io.out, tile.FType.D)
r_out_flags_double := divsqrt.io.exceptionFlags
assert (r_divsqrt_val, "[fdiv] a response is being generated for no request.")
}
assert (!(r_out_val && (divsqrt.io.outValid_div || divsqrt.io.outValid_sqrt)),
"[fdiv] Buffered output being overwritten by another output from the fdiv/fsqrt unit.")
val downvert_d2s = Module(new hardfloat.RecFNToRecFN(
inExpWidth = 11, inSigWidth = 53, outExpWidth = 8, outSigWidth = 24))
downvert_d2s.io.in := r_out_wdata_double
downvert_d2s.io.roundingMode := r_divsqrt_fin.rm
downvert_d2s.io.detectTininess := DontCare
val out_flags = r_out_flags_double | Mux(r_divsqrt_fin.typeTagIn === S, downvert_d2s.io.exceptionFlags, 0.U)
io.resp.valid := r_out_val && !IsKilledByBranch(io.brupdate, r_out_uop)
io.resp.bits.uop := r_out_uop
io.resp.bits.data :=
Mux(r_divsqrt_fin.typeTagIn === S,
box(downvert_d2s.io.out, false.B),
box(r_out_wdata_double, true.B))
io.resp.bits.fflags.valid := io.resp.valid
io.resp.bits.fflags.bits.uop := r_out_uop
io.resp.bits.fflags.bits.uop.br_mask := GetNewBrMask(io.brupdate, r_out_uop)
io.resp.bits.fflags.bits.flags := out_flags
}
| module UOPCodeFDivDecoder_1( // @[fdiv.scala:28:7]
input clock, // @[fdiv.scala:28:7]
input reset, // @[fdiv.scala:28:7]
input [6:0] io_uopc, // @[fdiv.scala:31:14]
output io_sigs_ldst, // @[fdiv.scala:31:14]
output io_sigs_wen, // @[fdiv.scala:31:14]
output io_sigs_ren1, // @[fdiv.scala:31:14]
output io_sigs_ren2, // @[fdiv.scala:31:14]
output io_sigs_ren3, // @[fdiv.scala:31:14]
output io_sigs_swap12, // @[fdiv.scala:31:14]
output io_sigs_swap23, // @[fdiv.scala:31:14]
output [1:0] io_sigs_typeTagIn, // @[fdiv.scala:31:14]
output [1:0] io_sigs_typeTagOut, // @[fdiv.scala:31:14]
output io_sigs_fromint, // @[fdiv.scala:31:14]
output io_sigs_toint, // @[fdiv.scala:31:14]
output io_sigs_fastpipe, // @[fdiv.scala:31:14]
output io_sigs_fma, // @[fdiv.scala:31:14]
output io_sigs_div, // @[fdiv.scala:31:14]
output io_sigs_sqrt, // @[fdiv.scala:31:14]
output io_sigs_wflags // @[fdiv.scala:31:14]
);
wire [6:0] io_uopc_0 = io_uopc; // @[fdiv.scala:28:7]
wire _decoder_decoded_orMatrixOutputs_T = 1'h1; // @[pla.scala:114:36]
wire _decoder_decoded_orMatrixOutputs_T_11 = 1'h1; // @[pla.scala:114:36]
wire [1:0] decoder_decoded_orMatrixOutputs_lo_hi_lo = 2'h0; // @[pla.scala:102:36]
wire [1:0] decoder_decoded_orMatrixOutputs_hi_lo_hi = 2'h0; // @[pla.scala:102:36]
wire [1:0] decoder_decoded_orMatrixOutputs_hi_hi_hi = 2'h0; // @[pla.scala:102:36]
wire io_sigs_vec = 1'h0; // @[fdiv.scala:28:7]
wire [6:0] decoder_decoded_plaInput = io_uopc_0; // @[pla.scala:77:22]
wire decoder_0; // @[Decode.scala:50:77]
wire decoder_1; // @[Decode.scala:50:77]
wire decoder_2; // @[Decode.scala:50:77]
wire decoder_3; // @[Decode.scala:50:77]
wire decoder_4; // @[Decode.scala:50:77]
wire decoder_5; // @[Decode.scala:50:77]
wire decoder_6; // @[Decode.scala:50:77]
wire decoder_9; // @[Decode.scala:50:77]
wire decoder_10; // @[Decode.scala:50:77]
wire decoder_11; // @[Decode.scala:50:77]
wire decoder_12; // @[Decode.scala:50:77]
wire decoder_13; // @[Decode.scala:50:77]
wire decoder_14; // @[Decode.scala:50:77]
wire decoder_15; // @[Decode.scala:50:77]
wire io_sigs_ldst_0; // @[fdiv.scala:28:7]
wire io_sigs_wen_0; // @[fdiv.scala:28:7]
wire io_sigs_ren1_0; // @[fdiv.scala:28:7]
wire io_sigs_ren2_0; // @[fdiv.scala:28:7]
wire io_sigs_ren3_0; // @[fdiv.scala:28:7]
wire io_sigs_swap12_0; // @[fdiv.scala:28:7]
wire io_sigs_swap23_0; // @[fdiv.scala:28:7]
wire [1:0] io_sigs_typeTagIn_0; // @[fdiv.scala:28:7]
wire [1:0] io_sigs_typeTagOut_0; // @[fdiv.scala:28:7]
wire io_sigs_fromint_0; // @[fdiv.scala:28:7]
wire io_sigs_toint_0; // @[fdiv.scala:28:7]
wire io_sigs_fastpipe_0; // @[fdiv.scala:28:7]
wire io_sigs_fma_0; // @[fdiv.scala:28:7]
wire io_sigs_div_0; // @[fdiv.scala:28:7]
wire io_sigs_sqrt_0; // @[fdiv.scala:28:7]
wire io_sigs_wflags_0; // @[fdiv.scala:28:7]
wire [6:0] decoder_decoded_invInputs = ~decoder_decoded_plaInput; // @[pla.scala:77:22, :78:21]
wire [15:0] decoder_decoded_invMatrixOutputs; // @[pla.scala:120:37]
wire [15:0] decoder_decoded; // @[pla.scala:81:23]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_0 = decoder_decoded_invInputs[1]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_1 = decoder_decoded_plaInput[2]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_2_1 = decoder_decoded_plaInput[2]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_2_2 = decoder_decoded_plaInput[2]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_2 = decoder_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_3_1 = decoder_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_3_2 = decoder_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_3 = decoder_decoded_invInputs[4]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_4_1 = decoder_decoded_invInputs[4]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_4_2 = decoder_decoded_invInputs[4]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_1_3 = decoder_decoded_invInputs[4]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_4 = decoder_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_5_1 = decoder_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_5_2 = decoder_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_2_3 = decoder_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_5 = decoder_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_6 = decoder_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_6_1 = decoder_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_3_3 = decoder_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45]
wire [1:0] decoder_decoded_andMatrixOutputs_lo_hi = {decoder_decoded_andMatrixOutputs_andMatrixInput_3, decoder_decoded_andMatrixOutputs_andMatrixInput_4}; // @[pla.scala:90:45, :91:29, :98:53]
wire [2:0] decoder_decoded_andMatrixOutputs_lo = {decoder_decoded_andMatrixOutputs_lo_hi, decoder_decoded_andMatrixOutputs_andMatrixInput_5}; // @[pla.scala:90:45, :98:53]
wire [1:0] decoder_decoded_andMatrixOutputs_hi_hi = {decoder_decoded_andMatrixOutputs_andMatrixInput_0, decoder_decoded_andMatrixOutputs_andMatrixInput_1}; // @[pla.scala:90:45, :91:29, :98:53]
wire [2:0] decoder_decoded_andMatrixOutputs_hi = {decoder_decoded_andMatrixOutputs_hi_hi, decoder_decoded_andMatrixOutputs_andMatrixInput_2}; // @[pla.scala:91:29, :98:53]
wire [5:0] _decoder_decoded_andMatrixOutputs_T = {decoder_decoded_andMatrixOutputs_hi, decoder_decoded_andMatrixOutputs_lo}; // @[pla.scala:98:53]
wire decoder_decoded_andMatrixOutputs_3_2 = &_decoder_decoded_andMatrixOutputs_T; // @[pla.scala:98:{53,70}]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_0_1 = decoder_decoded_invInputs[0]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_1_1 = decoder_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_1_2 = decoder_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire [1:0] decoder_decoded_andMatrixOutputs_lo_hi_1 = {decoder_decoded_andMatrixOutputs_andMatrixInput_4_1, decoder_decoded_andMatrixOutputs_andMatrixInput_5_1}; // @[pla.scala:90:45, :91:29, :98:53]
wire [2:0] decoder_decoded_andMatrixOutputs_lo_1 = {decoder_decoded_andMatrixOutputs_lo_hi_1, decoder_decoded_andMatrixOutputs_andMatrixInput_6}; // @[pla.scala:90:45, :98:53]
wire [1:0] decoder_decoded_andMatrixOutputs_hi_lo = {decoder_decoded_andMatrixOutputs_andMatrixInput_2_1, decoder_decoded_andMatrixOutputs_andMatrixInput_3_1}; // @[pla.scala:90:45, :91:29, :98:53]
wire [1:0] decoder_decoded_andMatrixOutputs_hi_hi_1 = {decoder_decoded_andMatrixOutputs_andMatrixInput_0_1, decoder_decoded_andMatrixOutputs_andMatrixInput_1_1}; // @[pla.scala:90:45, :91:29, :98:53]
wire [3:0] decoder_decoded_andMatrixOutputs_hi_1 = {decoder_decoded_andMatrixOutputs_hi_hi_1, decoder_decoded_andMatrixOutputs_hi_lo}; // @[pla.scala:98:53]
wire [6:0] _decoder_decoded_andMatrixOutputs_T_1 = {decoder_decoded_andMatrixOutputs_hi_1, decoder_decoded_andMatrixOutputs_lo_1}; // @[pla.scala:98:53]
wire decoder_decoded_andMatrixOutputs_4_2 = &_decoder_decoded_andMatrixOutputs_T_1; // @[pla.scala:98:{53,70}]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_0_2 = decoder_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45]
wire [1:0] decoder_decoded_andMatrixOutputs_lo_hi_2 = {decoder_decoded_andMatrixOutputs_andMatrixInput_4_2, decoder_decoded_andMatrixOutputs_andMatrixInput_5_2}; // @[pla.scala:90:45, :91:29, :98:53]
wire [2:0] decoder_decoded_andMatrixOutputs_lo_2 = {decoder_decoded_andMatrixOutputs_lo_hi_2, decoder_decoded_andMatrixOutputs_andMatrixInput_6_1}; // @[pla.scala:90:45, :98:53]
wire [1:0] decoder_decoded_andMatrixOutputs_hi_lo_1 = {decoder_decoded_andMatrixOutputs_andMatrixInput_2_2, decoder_decoded_andMatrixOutputs_andMatrixInput_3_2}; // @[pla.scala:90:45, :91:29, :98:53]
wire [1:0] decoder_decoded_andMatrixOutputs_hi_hi_2 = {decoder_decoded_andMatrixOutputs_andMatrixInput_0_2, decoder_decoded_andMatrixOutputs_andMatrixInput_1_2}; // @[pla.scala:90:45, :98:53]
wire [3:0] decoder_decoded_andMatrixOutputs_hi_2 = {decoder_decoded_andMatrixOutputs_hi_hi_2, decoder_decoded_andMatrixOutputs_hi_lo_1}; // @[pla.scala:98:53]
wire [6:0] _decoder_decoded_andMatrixOutputs_T_2 = {decoder_decoded_andMatrixOutputs_hi_2, decoder_decoded_andMatrixOutputs_lo_2}; // @[pla.scala:98:53]
wire decoder_decoded_andMatrixOutputs_2_2 = &_decoder_decoded_andMatrixOutputs_T_2; // @[pla.scala:98:{53,70}]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_0_3 = decoder_decoded_plaInput[3]; // @[pla.scala:77:22, :90:45]
wire [1:0] decoder_decoded_andMatrixOutputs_lo_3 = {decoder_decoded_andMatrixOutputs_andMatrixInput_2_3, decoder_decoded_andMatrixOutputs_andMatrixInput_3_3}; // @[pla.scala:90:45, :98:53]
wire [1:0] decoder_decoded_andMatrixOutputs_hi_3 = {decoder_decoded_andMatrixOutputs_andMatrixInput_0_3, decoder_decoded_andMatrixOutputs_andMatrixInput_1_3}; // @[pla.scala:90:45, :91:29, :98:53]
wire [3:0] _decoder_decoded_andMatrixOutputs_T_3 = {decoder_decoded_andMatrixOutputs_hi_3, decoder_decoded_andMatrixOutputs_lo_3}; // @[pla.scala:98:53]
wire decoder_decoded_andMatrixOutputs_0_2 = &_decoder_decoded_andMatrixOutputs_T_3; // @[pla.scala:98:{53,70}]
wire [1:0] _decoder_decoded_orMatrixOutputs_T_1 = {decoder_decoded_andMatrixOutputs_2_2, decoder_decoded_andMatrixOutputs_0_2}; // @[pla.scala:98:70, :114:19]
wire _decoder_decoded_orMatrixOutputs_T_2 = |_decoder_decoded_orMatrixOutputs_T_1; // @[pla.scala:114:{19,36}]
wire [1:0] _GEN = {decoder_decoded_andMatrixOutputs_3_2, decoder_decoded_andMatrixOutputs_4_2}; // @[pla.scala:98:70, :114:19]
wire [1:0] _decoder_decoded_orMatrixOutputs_T_3; // @[pla.scala:114:19]
assign _decoder_decoded_orMatrixOutputs_T_3 = _GEN; // @[pla.scala:114:19]
wire [1:0] _decoder_decoded_orMatrixOutputs_T_9; // @[pla.scala:114:19]
assign _decoder_decoded_orMatrixOutputs_T_9 = _GEN; // @[pla.scala:114:19]
wire _decoder_decoded_orMatrixOutputs_T_4 = |_decoder_decoded_orMatrixOutputs_T_3; // @[pla.scala:114:{19,36}]
wire [1:0] _GEN_0 = {decoder_decoded_andMatrixOutputs_4_2, decoder_decoded_andMatrixOutputs_0_2}; // @[pla.scala:98:70, :114:19]
wire [1:0] _decoder_decoded_orMatrixOutputs_T_5; // @[pla.scala:114:19]
assign _decoder_decoded_orMatrixOutputs_T_5 = _GEN_0; // @[pla.scala:114:19]
wire [1:0] _decoder_decoded_orMatrixOutputs_T_7; // @[pla.scala:114:19]
assign _decoder_decoded_orMatrixOutputs_T_7 = _GEN_0; // @[pla.scala:114:19]
wire _decoder_decoded_orMatrixOutputs_T_6 = |_decoder_decoded_orMatrixOutputs_T_5; // @[pla.scala:114:{19,36}]
wire _decoder_decoded_orMatrixOutputs_T_8 = |_decoder_decoded_orMatrixOutputs_T_7; // @[pla.scala:114:{19,36}]
wire _decoder_decoded_orMatrixOutputs_T_10 = |_decoder_decoded_orMatrixOutputs_T_9; // @[pla.scala:114:{19,36}]
wire [1:0] decoder_decoded_orMatrixOutputs_lo_lo_lo = {_decoder_decoded_orMatrixOutputs_T_2, 1'h1}; // @[pla.scala:102:36, :114:36]
wire [1:0] decoder_decoded_orMatrixOutputs_lo_lo_hi = {1'h0, _decoder_decoded_orMatrixOutputs_T_4}; // @[pla.scala:102:36, :114:36]
wire [3:0] decoder_decoded_orMatrixOutputs_lo_lo = {decoder_decoded_orMatrixOutputs_lo_lo_hi, decoder_decoded_orMatrixOutputs_lo_lo_lo}; // @[pla.scala:102:36]
wire [1:0] decoder_decoded_orMatrixOutputs_lo_hi_hi = {_decoder_decoded_orMatrixOutputs_T_6, 1'h0}; // @[pla.scala:102:36, :114:36]
wire [3:0] decoder_decoded_orMatrixOutputs_lo_hi = {decoder_decoded_orMatrixOutputs_lo_hi_hi, 2'h0}; // @[pla.scala:102:36]
wire [7:0] decoder_decoded_orMatrixOutputs_lo = {decoder_decoded_orMatrixOutputs_lo_hi, decoder_decoded_orMatrixOutputs_lo_lo}; // @[pla.scala:102:36]
wire [1:0] decoder_decoded_orMatrixOutputs_hi_lo_lo = {1'h0, _decoder_decoded_orMatrixOutputs_T_8}; // @[pla.scala:102:36, :114:36]
wire [3:0] decoder_decoded_orMatrixOutputs_hi_lo = {2'h0, decoder_decoded_orMatrixOutputs_hi_lo_lo}; // @[pla.scala:102:36]
wire [1:0] decoder_decoded_orMatrixOutputs_hi_hi_lo = {1'h1, _decoder_decoded_orMatrixOutputs_T_10}; // @[pla.scala:102:36, :114:36]
wire [3:0] decoder_decoded_orMatrixOutputs_hi_hi = {2'h0, decoder_decoded_orMatrixOutputs_hi_hi_lo}; // @[pla.scala:102:36]
wire [7:0] decoder_decoded_orMatrixOutputs_hi = {decoder_decoded_orMatrixOutputs_hi_hi, decoder_decoded_orMatrixOutputs_hi_lo}; // @[pla.scala:102:36]
wire [15:0] decoder_decoded_orMatrixOutputs = {decoder_decoded_orMatrixOutputs_hi, decoder_decoded_orMatrixOutputs_lo}; // @[pla.scala:102:36]
wire _decoder_decoded_invMatrixOutputs_T = decoder_decoded_orMatrixOutputs[0]; // @[pla.scala:102:36, :124:31]
wire _decoder_decoded_invMatrixOutputs_T_1 = decoder_decoded_orMatrixOutputs[1]; // @[pla.scala:102:36, :124:31]
wire _decoder_decoded_invMatrixOutputs_T_2 = decoder_decoded_orMatrixOutputs[2]; // @[pla.scala:102:36, :124:31]
wire _decoder_decoded_invMatrixOutputs_T_3 = decoder_decoded_orMatrixOutputs[3]; // @[pla.scala:102:36, :124:31]
wire _decoder_decoded_invMatrixOutputs_T_4 = decoder_decoded_orMatrixOutputs[4]; // @[pla.scala:102:36, :124:31]
wire _decoder_decoded_invMatrixOutputs_T_5 = decoder_decoded_orMatrixOutputs[5]; // @[pla.scala:102:36, :124:31]
wire _decoder_decoded_invMatrixOutputs_T_6 = decoder_decoded_orMatrixOutputs[6]; // @[pla.scala:102:36, :124:31]
wire _decoder_decoded_invMatrixOutputs_T_7 = decoder_decoded_orMatrixOutputs[7]; // @[pla.scala:102:36, :124:31]
wire _decoder_decoded_invMatrixOutputs_T_8 = decoder_decoded_orMatrixOutputs[8]; // @[pla.scala:102:36, :124:31]
wire _decoder_decoded_invMatrixOutputs_T_9 = decoder_decoded_orMatrixOutputs[9]; // @[pla.scala:102:36, :124:31]
wire _decoder_decoded_invMatrixOutputs_T_10 = decoder_decoded_orMatrixOutputs[10]; // @[pla.scala:102:36, :124:31]
wire _decoder_decoded_invMatrixOutputs_T_11 = decoder_decoded_orMatrixOutputs[11]; // @[pla.scala:102:36, :124:31]
wire _decoder_decoded_invMatrixOutputs_T_12 = decoder_decoded_orMatrixOutputs[12]; // @[pla.scala:102:36, :124:31]
wire _decoder_decoded_invMatrixOutputs_T_13 = decoder_decoded_orMatrixOutputs[13]; // @[pla.scala:102:36, :124:31]
wire _decoder_decoded_invMatrixOutputs_T_14 = decoder_decoded_orMatrixOutputs[14]; // @[pla.scala:102:36, :124:31]
wire _decoder_decoded_invMatrixOutputs_T_15 = decoder_decoded_orMatrixOutputs[15]; // @[pla.scala:102:36, :124:31]
wire [1:0] decoder_decoded_invMatrixOutputs_lo_lo_lo = {_decoder_decoded_invMatrixOutputs_T_1, _decoder_decoded_invMatrixOutputs_T}; // @[pla.scala:120:37, :124:31]
wire [1:0] decoder_decoded_invMatrixOutputs_lo_lo_hi = {_decoder_decoded_invMatrixOutputs_T_3, _decoder_decoded_invMatrixOutputs_T_2}; // @[pla.scala:120:37, :124:31]
wire [3:0] decoder_decoded_invMatrixOutputs_lo_lo = {decoder_decoded_invMatrixOutputs_lo_lo_hi, decoder_decoded_invMatrixOutputs_lo_lo_lo}; // @[pla.scala:120:37]
wire [1:0] decoder_decoded_invMatrixOutputs_lo_hi_lo = {_decoder_decoded_invMatrixOutputs_T_5, _decoder_decoded_invMatrixOutputs_T_4}; // @[pla.scala:120:37, :124:31]
wire [1:0] decoder_decoded_invMatrixOutputs_lo_hi_hi = {_decoder_decoded_invMatrixOutputs_T_7, _decoder_decoded_invMatrixOutputs_T_6}; // @[pla.scala:120:37, :124:31]
wire [3:0] decoder_decoded_invMatrixOutputs_lo_hi = {decoder_decoded_invMatrixOutputs_lo_hi_hi, decoder_decoded_invMatrixOutputs_lo_hi_lo}; // @[pla.scala:120:37]
wire [7:0] decoder_decoded_invMatrixOutputs_lo = {decoder_decoded_invMatrixOutputs_lo_hi, decoder_decoded_invMatrixOutputs_lo_lo}; // @[pla.scala:120:37]
wire [1:0] decoder_decoded_invMatrixOutputs_hi_lo_lo = {_decoder_decoded_invMatrixOutputs_T_9, _decoder_decoded_invMatrixOutputs_T_8}; // @[pla.scala:120:37, :124:31]
wire [1:0] decoder_decoded_invMatrixOutputs_hi_lo_hi = {_decoder_decoded_invMatrixOutputs_T_11, _decoder_decoded_invMatrixOutputs_T_10}; // @[pla.scala:120:37, :124:31]
wire [3:0] decoder_decoded_invMatrixOutputs_hi_lo = {decoder_decoded_invMatrixOutputs_hi_lo_hi, decoder_decoded_invMatrixOutputs_hi_lo_lo}; // @[pla.scala:120:37]
wire [1:0] decoder_decoded_invMatrixOutputs_hi_hi_lo = {_decoder_decoded_invMatrixOutputs_T_13, _decoder_decoded_invMatrixOutputs_T_12}; // @[pla.scala:120:37, :124:31]
wire [1:0] decoder_decoded_invMatrixOutputs_hi_hi_hi = {_decoder_decoded_invMatrixOutputs_T_15, _decoder_decoded_invMatrixOutputs_T_14}; // @[pla.scala:120:37, :124:31]
wire [3:0] decoder_decoded_invMatrixOutputs_hi_hi = {decoder_decoded_invMatrixOutputs_hi_hi_hi, decoder_decoded_invMatrixOutputs_hi_hi_lo}; // @[pla.scala:120:37]
wire [7:0] decoder_decoded_invMatrixOutputs_hi = {decoder_decoded_invMatrixOutputs_hi_hi, decoder_decoded_invMatrixOutputs_hi_lo}; // @[pla.scala:120:37]
assign decoder_decoded_invMatrixOutputs = {decoder_decoded_invMatrixOutputs_hi, decoder_decoded_invMatrixOutputs_lo}; // @[pla.scala:120:37]
assign decoder_decoded = decoder_decoded_invMatrixOutputs; // @[pla.scala:81:23, :120:37]
assign decoder_0 = decoder_decoded[15]; // @[pla.scala:81:23]
assign io_sigs_ldst_0 = decoder_0; // @[Decode.scala:50:77]
assign decoder_1 = decoder_decoded[14]; // @[pla.scala:81:23]
assign io_sigs_wen_0 = decoder_1; // @[Decode.scala:50:77]
assign decoder_2 = decoder_decoded[13]; // @[pla.scala:81:23]
assign io_sigs_ren1_0 = decoder_2; // @[Decode.scala:50:77]
assign decoder_3 = decoder_decoded[12]; // @[pla.scala:81:23]
assign io_sigs_ren2_0 = decoder_3; // @[Decode.scala:50:77]
assign decoder_4 = decoder_decoded[11]; // @[pla.scala:81:23]
assign io_sigs_ren3_0 = decoder_4; // @[Decode.scala:50:77]
assign decoder_5 = decoder_decoded[10]; // @[pla.scala:81:23]
assign io_sigs_swap12_0 = decoder_5; // @[Decode.scala:50:77]
assign decoder_6 = decoder_decoded[9]; // @[pla.scala:81:23]
assign io_sigs_swap23_0 = decoder_6; // @[Decode.scala:50:77]
wire decoder_7 = decoder_decoded[8]; // @[pla.scala:81:23]
wire decoder_8 = decoder_decoded[7]; // @[pla.scala:81:23]
assign decoder_9 = decoder_decoded[6]; // @[pla.scala:81:23]
assign io_sigs_fromint_0 = decoder_9; // @[Decode.scala:50:77]
assign decoder_10 = decoder_decoded[5]; // @[pla.scala:81:23]
assign io_sigs_toint_0 = decoder_10; // @[Decode.scala:50:77]
assign decoder_11 = decoder_decoded[4]; // @[pla.scala:81:23]
assign io_sigs_fastpipe_0 = decoder_11; // @[Decode.scala:50:77]
assign decoder_12 = decoder_decoded[3]; // @[pla.scala:81:23]
assign io_sigs_fma_0 = decoder_12; // @[Decode.scala:50:77]
assign decoder_13 = decoder_decoded[2]; // @[pla.scala:81:23]
assign io_sigs_div_0 = decoder_13; // @[Decode.scala:50:77]
assign decoder_14 = decoder_decoded[1]; // @[pla.scala:81:23]
assign io_sigs_sqrt_0 = decoder_14; // @[Decode.scala:50:77]
assign decoder_15 = decoder_decoded[0]; // @[pla.scala:81:23]
assign io_sigs_wflags_0 = decoder_15; // @[Decode.scala:50:77]
assign io_sigs_typeTagIn_0 = {1'h0, decoder_7}; // @[Decode.scala:50:77]
assign io_sigs_typeTagOut_0 = {1'h0, decoder_8}; // @[Decode.scala:50:77]
assign io_sigs_ldst = io_sigs_ldst_0; // @[fdiv.scala:28:7]
assign io_sigs_wen = io_sigs_wen_0; // @[fdiv.scala:28:7]
assign io_sigs_ren1 = io_sigs_ren1_0; // @[fdiv.scala:28:7]
assign io_sigs_ren2 = io_sigs_ren2_0; // @[fdiv.scala:28:7]
assign io_sigs_ren3 = io_sigs_ren3_0; // @[fdiv.scala:28:7]
assign io_sigs_swap12 = io_sigs_swap12_0; // @[fdiv.scala:28:7]
assign io_sigs_swap23 = io_sigs_swap23_0; // @[fdiv.scala:28:7]
assign io_sigs_typeTagIn = io_sigs_typeTagIn_0; // @[fdiv.scala:28:7]
assign io_sigs_typeTagOut = io_sigs_typeTagOut_0; // @[fdiv.scala:28:7]
assign io_sigs_fromint = io_sigs_fromint_0; // @[fdiv.scala:28:7]
assign io_sigs_toint = io_sigs_toint_0; // @[fdiv.scala:28:7]
assign io_sigs_fastpipe = io_sigs_fastpipe_0; // @[fdiv.scala:28:7]
assign io_sigs_fma = io_sigs_fma_0; // @[fdiv.scala:28:7]
assign io_sigs_div = io_sigs_div_0; // @[fdiv.scala:28:7]
assign io_sigs_sqrt = io_sigs_sqrt_0; // @[fdiv.scala:28:7]
assign io_sigs_wflags = io_sigs_wflags_0; // @[fdiv.scala:28:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File UnsafeAXI4ToTL.scala:
package ara
import chisel3._
import chisel3.util._
import freechips.rocketchip.amba._
import freechips.rocketchip.amba.axi4._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle {
val data = UInt(dataWidth.W)
val resp = UInt(respWidth.W)
val last = Bool()
val user = BundleMap(userFields)
}
/** Parameters for [[BaseReservableListBuffer]] and all child classes.
*
* @param numEntries Total number of elements that can be stored in the 'data' RAM
* @param numLists Maximum number of linked lists
* @param numBeats Maximum number of beats per entry
*/
case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) {
// Avoid zero-width wires when we call 'log2Ceil'
val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries)
val listBits = if (numLists == 1) 1 else log2Ceil(numLists)
val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats)
}
case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName)
extends MixedAdapterNode(AXI4Imp, TLImp)(
dFn = { case mp =>
TLMasterPortParameters.v2(
masters = mp.masters.zipWithIndex.map { case (m, i) =>
// Support 'numTlTxns' read requests and 'numTlTxns' write requests at once.
val numSourceIds = numTlTxns * 2
TLMasterParameters.v2(
name = m.name,
sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds),
nodePath = m.nodePath
)
},
echoFields = mp.echoFields,
requestFields = AMBAProtField() +: mp.requestFields,
responseKeys = mp.responseKeys
)
},
uFn = { mp =>
AXI4SlavePortParameters(
slaves = mp.managers.map { m =>
val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits))
AXI4SlaveParameters(
address = m.address,
resources = m.resources,
regionType = m.regionType,
executable = m.executable,
nodePath = m.nodePath,
supportsWrite = m.supportsPutPartial.intersect(maxXfer),
supportsRead = m.supportsGet.intersect(maxXfer),
interleavedId = Some(0) // TL2 never interleaves D beats
)
},
beatBytes = mp.beatBytes,
minLatency = mp.minLatency,
responseFields = mp.responseFields,
requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt)
)
}
)
class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule {
require(numTlTxns >= 1)
require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2")
val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt)
lazy val module = new LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
edgeIn.master.masters.foreach { m =>
require(m.aligned, "AXI4ToTL requires aligned requests")
}
val numIds = edgeIn.master.endId
val beatBytes = edgeOut.slave.beatBytes
val maxTransfer = edgeOut.slave.maxTransfer
val maxBeats = maxTransfer / beatBytes
// Look for an Error device to redirect bad requests
val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError")
require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.")
val errorDev = errorDevs.maxBy(_.maxTransfer)
val errorDevAddr = errorDev.address.head.base
require(
errorDev.supportsPutPartial.contains(maxTransfer),
s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer"
)
require(
errorDev.supportsGet.contains(maxTransfer),
s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer"
)
// All of the read-response reordering logic.
val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields)
val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats)
val listBuffer = if (numTlTxns > 1) {
Module(new ReservableListBuffer(listBufData, listBufParams))
} else {
Module(new PassthroughListBuffer(listBufData, listBufParams))
}
// To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to
// 0 for read requests and 1 for write requests.
val isReadSourceBit = 0.U(1.W)
val isWriteSourceBit = 1.U(1.W)
/* Read request logic */
val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val rBytes1 = in.ar.bits.bytes1()
val rSize = OH1ToUInt(rBytes1)
val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize)
val rId = if (numTlTxns > 1) {
Cat(isReadSourceBit, listBuffer.ioReservedIndex)
} else {
isReadSourceBit
}
val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Indicates if there are still valid TileLink source IDs left to use.
val canIssueR = listBuffer.ioReserve.ready
listBuffer.ioReserve.bits := in.ar.bits.id
listBuffer.ioReserve.valid := in.ar.valid && rOut.ready
in.ar.ready := rOut.ready && canIssueR
rOut.valid := in.ar.valid && canIssueR
rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2
rOut.bits.user :<= in.ar.bits.user
rOut.bits.user.lift(AMBAProt).foreach { rProt =>
rProt.privileged := in.ar.bits.prot(0)
rProt.secure := !in.ar.bits.prot(1)
rProt.fetch := in.ar.bits.prot(2)
rProt.bufferable := in.ar.bits.cache(0)
rProt.modifiable := in.ar.bits.cache(1)
rProt.readalloc := in.ar.bits.cache(2)
rProt.writealloc := in.ar.bits.cache(3)
}
/* Write request logic */
// Strip off the MSB, which identifies the transaction as read vs write.
val strippedResponseSourceId = if (numTlTxns > 1) {
out.d.bits.source((out.d.bits.source).getWidth - 2, 0)
} else {
// When there's only 1 TileLink transaction allowed for read/write, then this field is always 0.
0.U(1.W)
}
// Track when a write request burst is in progress.
val writeBurstBusy = RegInit(false.B)
when(in.w.fire) {
writeBurstBusy := !in.w.bits.last
}
val usedWriteIds = RegInit(0.U(numTlTxns.W))
val canIssueW = !usedWriteIds.andR
val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W))
val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W))
usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet
// Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't
// change mid-burst.
val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W))
val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy
val freeWriteIdIndex = OHToUInt(freeWriteIdOH)
freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds
val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val wBytes1 = in.aw.bits.bytes1()
val wSize = OH1ToUInt(wBytes1)
val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize)
val wId = if (numTlTxns > 1) {
Cat(isWriteSourceBit, freeWriteIdIndex)
} else {
isWriteSourceBit
}
val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain
// asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but
// the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb
// bits during a W-channel burst.
in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW
in.w.ready := wOut.ready && in.aw.valid && canIssueW
wOut.valid := in.aw.valid && in.w.valid && canIssueW
wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2
in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ }
wOut.bits.user :<= in.aw.bits.user
wOut.bits.user.lift(AMBAProt).foreach { wProt =>
wProt.privileged := in.aw.bits.prot(0)
wProt.secure := !in.aw.bits.prot(1)
wProt.fetch := in.aw.bits.prot(2)
wProt.bufferable := in.aw.bits.cache(0)
wProt.modifiable := in.aw.bits.cache(1)
wProt.readalloc := in.aw.bits.cache(2)
wProt.writealloc := in.aw.bits.cache(3)
}
// Merge the AXI4 read/write requests into the TL-A channel.
TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut))
/* Read/write response logic */
val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle)))
val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle)))
val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY)
val dHasData = edgeOut.hasData(out.d.bits)
val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d)
val dNumBeats1 = edgeOut.numBeats1(out.d.bits)
// Handle cases where writeack arrives before write is done
val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U
out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck)
listBuffer.ioDataOut.ready := okR.ready
okR.valid := listBuffer.ioDataOut.valid
okB.valid := out.d.valid && !dHasData && !writeEarlyAck
listBuffer.ioResponse.valid := out.d.valid && dHasData
listBuffer.ioResponse.bits.index := strippedResponseSourceId
listBuffer.ioResponse.bits.data.data := out.d.bits.data
listBuffer.ioResponse.bits.data.resp := dResp
listBuffer.ioResponse.bits.data.last := dLast
listBuffer.ioResponse.bits.data.user :<= out.d.bits.user
listBuffer.ioResponse.bits.count := dCount
listBuffer.ioResponse.bits.numBeats1 := dNumBeats1
okR.bits.id := listBuffer.ioDataOut.bits.listIndex
okR.bits.data := listBuffer.ioDataOut.bits.payload.data
okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp
okR.bits.last := listBuffer.ioDataOut.bits.payload.last
okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user
// Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write
// response, mark the write transaction as complete.
val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W))
val writeResponseId = writeIdMap.read(strippedResponseSourceId)
when(wOut.fire) {
writeIdMap.write(freeWriteIdIndex, in.aw.bits.id)
}
when(edgeOut.done(wOut)) {
usedWriteIdsSet := freeWriteIdOH
}
when(okB.fire) {
usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns)
}
okB.bits.id := writeResponseId
okB.bits.resp := dResp
okB.bits.user :<= out.d.bits.user
// AXI4 needs irrevocable behaviour
in.r <> Queue.irrevocable(okR, 1, flow = true)
in.b <> Queue.irrevocable(okB, 1, flow = true)
// Unused channels
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
/* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */
def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = {
val lReqType = reqType.toLowerCase
when(a.valid) {
assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U)
// Narrow transfers and FIXED bursts must be single-beat bursts.
when(a.bits.len =/= 0.U) {
assert(
a.bits.size === log2Ceil(beatBytes).U,
s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)",
1.U << a.bits.size,
a.bits.len + 1.U
)
assert(
a.bits.burst =/= AXI4Parameters.BURST_FIXED,
s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)",
a.bits.len + 1.U
)
}
// Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in
// particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink
// Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts.
}
}
checkRequest(in.ar, "Read")
checkRequest(in.aw, "Write")
}
}
}
object UnsafeAXI4ToTL {
def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = {
val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt))
axi42tl.node
}
}
/* ReservableListBuffer logic, and associated classes. */
class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle {
val index = UInt(params.entryBits.W)
val count = UInt(params.beatBits.W)
val numBeats1 = UInt(params.beatBits.W)
}
class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle {
val listIndex = UInt(params.listBits.W)
}
/** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */
abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends Module {
require(params.numEntries > 0)
require(params.numLists > 0)
val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W))))
val ioReservedIndex = IO(Output(UInt(params.entryBits.W)))
val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params))))
val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params)))
}
/** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve
* linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the
* 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a
* given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order.
*
* ==Constructor==
* @param gen Chisel type of linked list data element
* @param params Other parameters
*
* ==Module IO==
* @param ioReserve Index of list to reserve a new element in
* @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire'
* @param ioResponse Payload containing response data and linked-list-entry index
* @param ioDataOut Payload containing data read from response linked list and linked list index
*/
class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
val valid = RegInit(0.U(params.numLists.W))
val head = Mem(params.numLists, UInt(params.entryBits.W))
val tail = Mem(params.numLists, UInt(params.entryBits.W))
val used = RegInit(0.U(params.numEntries.W))
val next = Mem(params.numEntries, UInt(params.entryBits.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) }
val dataIsPresent = RegInit(0.U(params.numEntries.W))
val beats = Mem(params.numEntries, UInt(params.beatBits.W))
// The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower.
val dataMemReadEnable = WireDefault(false.B)
val dataMemWriteEnable = WireDefault(false.B)
assert(!(dataMemReadEnable && dataMemWriteEnable))
// 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the
// lowest-index entry in the 'data' RAM which is free.
val freeOH = Wire(UInt(params.numEntries.W))
val freeIndex = OHToUInt(freeOH)
freeOH := ~(leftOR(~used) << 1) & ~used
ioReservedIndex := freeIndex
val validSet = WireDefault(0.U(params.numLists.W))
val validClr = WireDefault(0.U(params.numLists.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
val dataIsPresentSet = WireDefault(0.U(params.numEntries.W))
val dataIsPresentClr = WireDefault(0.U(params.numEntries.W))
valid := (valid & ~validClr) | validSet
used := (used & ~usedClr) | usedSet
dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet
/* Reservation logic signals */
val reserveTail = Wire(UInt(params.entryBits.W))
val reserveIsValid = Wire(Bool())
/* Response logic signals */
val responseIndex = Wire(UInt(params.entryBits.W))
val responseListIndex = Wire(UInt(params.listBits.W))
val responseHead = Wire(UInt(params.entryBits.W))
val responseTail = Wire(UInt(params.entryBits.W))
val nextResponseHead = Wire(UInt(params.entryBits.W))
val nextDataIsPresent = Wire(Bool())
val isResponseInOrder = Wire(Bool())
val isEndOfList = Wire(Bool())
val isLastBeat = Wire(Bool())
val isLastResponseBeat = Wire(Bool())
val isLastUnwindBeat = Wire(Bool())
/* Reservation logic */
reserveTail := tail.read(ioReserve.bits)
reserveIsValid := valid(ioReserve.bits)
ioReserve.ready := !used.andR
// When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we
// actually start a new list, rather than appending to a list that's about to disappear.
val reserveResponseSameList = ioReserve.bits === responseListIndex
val appendToAndDestroyList =
ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat
when(ioReserve.fire) {
validSet := UIntToOH(ioReserve.bits, params.numLists)
usedSet := freeOH
when(reserveIsValid && !appendToAndDestroyList) {
next.write(reserveTail, freeIndex)
}.otherwise {
head.write(ioReserve.bits, freeIndex)
}
tail.write(ioReserve.bits, freeIndex)
map.write(freeIndex, ioReserve.bits)
}
/* Response logic */
// The majority of the response logic (reading from and writing to the various RAMs) is common between the
// response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid).
// The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the
// 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and
// response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after
// two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker.
responseHead := head.read(responseListIndex)
responseTail := tail.read(responseListIndex)
nextResponseHead := next.read(responseIndex)
nextDataIsPresent := dataIsPresent(nextResponseHead)
// Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since
// there isn't a next element in the linked list.
isResponseInOrder := responseHead === responseIndex
isEndOfList := responseHead === responseTail
isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1
// When a response's last beat is sent to the output channel, mark it as completed. This can happen in two
// situations:
// 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM
// reservation was never needed.
// 2. An entry is read out of the 'data' SRAM (within the unwind FSM).
when(ioDataOut.fire && isLastBeat) {
// Mark the reservation as no-longer-used.
usedClr := UIntToOH(responseIndex, params.numEntries)
// If the response is in-order, then we're popping an element from this linked list.
when(isEndOfList) {
// Once we pop the last element from a linked list, mark it as no-longer-present.
validClr := UIntToOH(responseListIndex, params.numLists)
}.otherwise {
// Move the linked list's head pointer to the new head pointer.
head.write(responseListIndex, nextResponseHead)
}
}
// If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding.
when(ioResponse.fire && !isResponseInOrder) {
dataMemWriteEnable := true.B
when(isLastResponseBeat) {
dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries)
beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1)
}
}
// Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to.
val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats)
(responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) =>
when(select && dataMemWriteEnable) {
seqMem.write(ioResponse.bits.index, ioResponse.bits.data)
}
}
/* Response unwind logic */
// Unwind FSM state definitions
val sIdle :: sUnwinding :: Nil = Enum(2)
val unwindState = RegInit(sIdle)
val busyUnwinding = unwindState === sUnwinding
val startUnwind = Wire(Bool())
val stopUnwind = Wire(Bool())
when(startUnwind) {
unwindState := sUnwinding
}.elsewhen(stopUnwind) {
unwindState := sIdle
}
assert(!(startUnwind && stopUnwind))
// Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to
// become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is
// invalid.
//
// Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to
// worry about overwriting the 'data' SRAM's output when we start the unwind FSM.
startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent
// Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of
// two things happens:
// 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent)
// 2. There are no more outstanding responses in this list (isEndOfList)
//
// Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are
// passing from 'ioResponse' to 'ioDataOut'.
stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList)
val isUnwindBurstOver = Wire(Bool())
val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable)
// Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of
// beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we
// increment 'beatCounter' until it reaches 'unwindBeats1'.
val unwindBeats1 = Reg(UInt(params.beatBits.W))
val nextBeatCounter = Wire(UInt(params.beatBits.W))
val beatCounter = RegNext(nextBeatCounter)
isUnwindBurstOver := beatCounter === unwindBeats1
when(startNewBurst) {
unwindBeats1 := beats.read(nextResponseHead)
nextBeatCounter := 0.U
}.elsewhen(dataMemReadEnable) {
nextBeatCounter := beatCounter + 1.U
}.otherwise {
nextBeatCounter := beatCounter
}
// When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next
// entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which
// happens at the start of reading a new stored burst).
val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst)
responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index)
// Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the
// SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead
// holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'.
val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex)
// The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid
// until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle).
val unwindDataIsValid = RegInit(false.B)
when(dataMemReadEnable) {
unwindDataIsValid := true.B
}.elsewhen(ioDataOut.fire) {
unwindDataIsValid := false.B
}
isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid
// Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats.
isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat)
// Select which SRAM to read from based on the beat counter.
val dataOutputVec = Wire(Vec(params.numBeats, gen))
val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats)
(nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) =>
dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable)
}
// Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured
// by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading
// from.
val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable)
// Mark 'data' burst entries as no-longer-present as they get read out of the SRAM.
when(dataMemReadEnable) {
dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries)
}
// As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue
// a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know
// we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be
// consumed by the output channel).
val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready
dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem)
// While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need
// 'responseListIndex' to be coherent for the entire unwind process.
val rawResponseListIndex = map.read(responseIndex)
val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst)
responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex)
// Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are
// just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that
// could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be
// single-ported.
ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding
// Either pass an in-order response to the output channel, or data read from the unwind FSM.
ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder)
ioDataOut.bits.listIndex := responseListIndex
ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data)
// It's an error to get a response that isn't associated with a valid linked list.
when(ioResponse.fire || unwindDataIsValid) {
assert(
valid(responseListIndex),
"No linked list exists at index %d, mapped from %d",
responseListIndex,
responseIndex
)
}
when(busyUnwinding && dataMemReadEnable) {
assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order")
}
}
/** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1.
*
* Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to
* reorder any responses, or store any linked lists.
*/
class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1")
val used = RegInit(0.U(params.numEntries.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
used := (used & ~usedClr) | usedSet
ioReserve.ready := used === 0.U
// Store which list index was reserved, we need to return this value when we get a response.
when(ioReserve.fire) {
usedSet := 1.U
map.write(0.U, ioReserve.bits)
}
// There's only one valid linked list entry, which is at index 0.
ioReservedIndex := 0.U
val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1
// Mark the linked list as empty when we get the last beat in a response.
// Note that 'ioResponse.fire === ioDataOut.fire'.
when(ioResponse.fire && isLastResponseBeat) {
usedClr := 1.U
}
// Always pass the response data straight through, since we never need to reorder the response data.
ioDataOut.bits.listIndex := map.read(0.U)
ioDataOut.bits.payload := ioResponse.bits.data
ioDataOut.valid := ioResponse.valid
ioResponse.ready := ioDataOut.ready
}
| module dataMems_481( // @[UnsafeAXI4ToTL.scala:365:62]
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [66:0] R0_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [66:0] W0_data
);
dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62]
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data)
); // @[UnsafeAXI4ToTL.scala:365:62]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Crossing.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.interrupts
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.util.{SynchronizerShiftReg, AsyncResetReg}
@deprecated("IntXing does not ensure interrupt source is glitch free. Use IntSyncSource and IntSyncSink", "rocket-chip 1.2")
class IntXing(sync: Int = 3)(implicit p: Parameters) extends LazyModule
{
val intnode = IntAdapterNode()
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(intnode.in zip intnode.out) foreach { case ((in, _), (out, _)) =>
out := SynchronizerShiftReg(in, sync)
}
}
}
object IntSyncCrossingSource
{
def apply(alreadyRegistered: Boolean = false)(implicit p: Parameters) =
{
val intsource = LazyModule(new IntSyncCrossingSource(alreadyRegistered))
intsource.node
}
}
class IntSyncCrossingSource(alreadyRegistered: Boolean = false)(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSourceNode(alreadyRegistered)
lazy val module = if (alreadyRegistered) (new ImplRegistered) else (new Impl)
class Impl extends LazyModuleImp(this) {
def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0)
override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.sync := AsyncResetReg(Cat(in.reverse)).asBools
}
}
class ImplRegistered extends LazyRawModuleImp(this) {
def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0)
override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}_Registered"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.sync := in
}
}
}
object IntSyncCrossingSink
{
@deprecated("IntSyncCrossingSink which used the `sync` parameter to determine crossing type is deprecated. Use IntSyncAsyncCrossingSink, IntSyncRationalCrossingSink, or IntSyncSyncCrossingSink instead for > 1, 1, and 0 sync values respectively", "rocket-chip 1.2")
def apply(sync: Int = 3)(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync))
intsink.node
}
}
class IntSyncAsyncCrossingSink(sync: Int = 3)(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(sync)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = s"IntSyncAsyncCrossingSink_n${node.out.size}x${node.out.head._1.size}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := SynchronizerShiftReg(in.sync, sync)
}
}
}
object IntSyncAsyncCrossingSink
{
def apply(sync: Int = 3)(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync))
intsink.node
}
}
class IntSyncSyncCrossingSink()(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(0)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
def outSize = node.out.headOption.map(_._1.size).getOrElse(0)
override def desiredName = s"IntSyncSyncCrossingSink_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := in.sync
}
}
}
object IntSyncSyncCrossingSink
{
def apply()(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncSyncCrossingSink())
intsink.node
}
}
class IntSyncRationalCrossingSink()(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(1)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def outSize = node.out.headOption.map(_._1.size).getOrElse(0)
override def desiredName = s"IntSyncRationalCrossingSink_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := RegNext(in.sync)
}
}
}
object IntSyncRationalCrossingSink
{
def apply()(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncRationalCrossingSink())
intsink.node
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File AsyncResetReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
/** This black-boxes an Async Reset
* (or Set)
* Register.
*
* Because Chisel doesn't support
* parameterized black boxes,
* we unfortunately have to
* instantiate a number of these.
*
* We also have to hard-code the set/
* reset behavior.
*
* Do not confuse an asynchronous
* reset signal with an asynchronously
* reset reg. You should still
* properly synchronize your reset
* deassertion.
*
* @param d Data input
* @param q Data Output
* @param clk Clock Input
* @param rst Reset Input
* @param en Write Enable Input
*
*/
class AsyncResetReg(resetValue: Int = 0) extends RawModule {
val io = IO(new Bundle {
val d = Input(Bool())
val q = Output(Bool())
val en = Input(Bool())
val clk = Input(Clock())
val rst = Input(Reset())
})
val reg = withClockAndReset(io.clk, io.rst.asAsyncReset)(RegInit(resetValue.U(1.W)))
when (io.en) {
reg := io.d
}
io.q := reg
}
class SimpleRegIO(val w: Int) extends Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
}
class AsyncResetRegVec(val w: Int, val init: BigInt) extends Module {
override def desiredName = s"AsyncResetRegVec_w${w}_i${init}"
val io = IO(new SimpleRegIO(w))
val reg = withReset(reset.asAsyncReset)(RegInit(init.U(w.W)))
when (io.en) {
reg := io.d
}
io.q := reg
}
object AsyncResetReg {
// Create Single Registers
def apply(d: Bool, clk: Clock, rst: Bool, init: Boolean, name: Option[String]): Bool = {
val reg = Module(new AsyncResetReg(if (init) 1 else 0))
reg.io.d := d
reg.io.clk := clk
reg.io.rst := rst
reg.io.en := true.B
name.foreach(reg.suggestName(_))
reg.io.q
}
def apply(d: Bool, clk: Clock, rst: Bool): Bool = apply(d, clk, rst, false, None)
def apply(d: Bool, clk: Clock, rst: Bool, name: String): Bool = apply(d, clk, rst, false, Some(name))
// Create Vectors of Registers
def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: Option[String] = None): UInt = {
val w = updateData.getWidth max resetData.bitLength
val reg = Module(new AsyncResetRegVec(w, resetData))
name.foreach(reg.suggestName(_))
reg.io.d := updateData
reg.io.en := enable
reg.io.q
}
def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: String): UInt = apply(updateData,
resetData, enable, Some(name))
def apply(updateData: UInt, resetData: BigInt): UInt = apply(updateData, resetData, enable = true.B)
def apply(updateData: UInt, resetData: BigInt, name: String): UInt = apply(updateData, resetData, enable = true.B, Some(name))
def apply(updateData: UInt, enable: Bool): UInt = apply(updateData, resetData=BigInt(0), enable)
def apply(updateData: UInt, enable: Bool, name: String): UInt = apply(updateData, resetData = BigInt(0), enable, Some(name))
def apply(updateData: UInt): UInt = apply(updateData, resetData = BigInt(0), enable = true.B)
def apply(updateData: UInt, name:String): UInt = apply(updateData, resetData = BigInt(0), enable = true.B, Some(name))
}
| module IntSyncCrossingSource_n1x2_8( // @[Crossing.scala:41:9]
input clock, // @[Crossing.scala:41:9]
input reset, // @[Crossing.scala:41:9]
input auto_in_0, // @[LazyModuleImp.scala:107:25]
input auto_in_1, // @[LazyModuleImp.scala:107:25]
output auto_out_sync_0, // @[LazyModuleImp.scala:107:25]
output auto_out_sync_1 // @[LazyModuleImp.scala:107:25]
);
wire [1:0] _reg_io_q; // @[AsyncResetReg.scala:86:21]
wire auto_in_0_0 = auto_in_0; // @[Crossing.scala:41:9]
wire auto_in_1_0 = auto_in_1; // @[Crossing.scala:41:9]
wire nodeIn_0 = auto_in_0_0; // @[Crossing.scala:41:9]
wire nodeIn_1 = auto_in_1_0; // @[Crossing.scala:41:9]
wire nodeOut_sync_0; // @[MixedNode.scala:542:17]
wire nodeOut_sync_1; // @[MixedNode.scala:542:17]
wire auto_out_sync_0_0; // @[Crossing.scala:41:9]
wire auto_out_sync_1_0; // @[Crossing.scala:41:9]
assign auto_out_sync_0_0 = nodeOut_sync_0; // @[Crossing.scala:41:9]
assign auto_out_sync_1_0 = nodeOut_sync_1; // @[Crossing.scala:41:9]
assign nodeOut_sync_0 = _reg_io_q[0]; // @[AsyncResetReg.scala:86:21]
assign nodeOut_sync_1 = _reg_io_q[1]; // @[AsyncResetReg.scala:86:21]
AsyncResetRegVec_w2_i0_8 reg_0 ( // @[AsyncResetReg.scala:86:21]
.clock (clock),
.reset (reset),
.io_d ({nodeIn_1, nodeIn_0}), // @[Crossing.scala:45:36]
.io_q (_reg_io_q)
); // @[AsyncResetReg.scala:86:21]
assign auto_out_sync_0 = auto_out_sync_0_0; // @[Crossing.scala:41:9]
assign auto_out_sync_1 = auto_out_sync_1_0; // @[Crossing.scala:41:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ClockDomain.scala:
package freechips.rocketchip.prci
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
abstract class Domain(implicit p: Parameters) extends LazyModule with HasDomainCrossing
{
def clockBundle: ClockBundle
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
childClock := clockBundle.clock
childReset := clockBundle.reset
override def provideImplicitClockToLazyChildren = true
// these are just for backwards compatibility with external devices
// that were manually wiring themselves to the domain's clock/reset input:
val clock = IO(Output(chiselTypeOf(clockBundle.clock)))
val reset = IO(Output(chiselTypeOf(clockBundle.reset)))
clock := clockBundle.clock
reset := clockBundle.reset
}
}
abstract class ClockDomain(implicit p: Parameters) extends Domain with HasClockDomainCrossing
class ClockSinkDomain(val clockSinkParams: ClockSinkParameters)(implicit p: Parameters) extends ClockDomain
{
def this(take: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSinkParameters(take = take, name = name))
val clockNode = ClockSinkNode(Seq(clockSinkParams))
def clockBundle = clockNode.in.head._1
override lazy val desiredName = (clockSinkParams.name.toSeq :+ "ClockSinkDomain").mkString
}
class ClockSourceDomain(val clockSourceParams: ClockSourceParameters)(implicit p: Parameters) extends ClockDomain
{
def this(give: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSourceParameters(give = give, name = name))
val clockNode = ClockSourceNode(Seq(clockSourceParams))
def clockBundle = clockNode.out.head._1
override lazy val desiredName = (clockSourceParams.name.toSeq :+ "ClockSourceDomain").mkString
}
abstract class ResetDomain(implicit p: Parameters) extends Domain with HasResetDomainCrossing
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File NoC.scala:
package constellation.noc
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp, BundleBridgeSink, InModuleBody}
import freechips.rocketchip.util.ElaborationArtefacts
import freechips.rocketchip.prci._
import constellation.router._
import constellation.channel._
import constellation.routing.{RoutingRelation, ChannelRoutingInfo}
import constellation.topology.{PhysicalTopology, UnidirectionalLine}
class NoCTerminalIO(
val ingressParams: Seq[IngressChannelParams],
val egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Bundle {
val ingress = MixedVec(ingressParams.map { u => Flipped(new IngressChannel(u)) })
val egress = MixedVec(egressParams.map { u => new EgressChannel(u) })
}
class NoC(nocParams: NoCParams)(implicit p: Parameters) extends LazyModule {
override def shouldBeInlined = nocParams.inlineNoC
val internalParams = InternalNoCParams(nocParams)
val allChannelParams = internalParams.channelParams
val allIngressParams = internalParams.ingressParams
val allEgressParams = internalParams.egressParams
val allRouterParams = internalParams.routerParams
val iP = p.alterPartial({ case InternalNoCKey => internalParams })
val nNodes = nocParams.topology.nNodes
val nocName = nocParams.nocName
val skipValidationChecks = nocParams.skipValidationChecks
val clockSourceNodes = Seq.tabulate(nNodes) { i => ClockSourceNode(Seq(ClockSourceParameters())) }
val router_sink_domains = Seq.tabulate(nNodes) { i =>
val router_sink_domain = LazyModule(new ClockSinkDomain(ClockSinkParameters(
name = Some(s"${nocName}_router_$i")
)))
router_sink_domain.clockNode := clockSourceNodes(i)
router_sink_domain
}
val routers = Seq.tabulate(nNodes) { i => router_sink_domains(i) {
val inParams = allChannelParams.filter(_.destId == i).map(
_.copy(payloadBits=allRouterParams(i).user.payloadBits)
)
val outParams = allChannelParams.filter(_.srcId == i).map(
_.copy(payloadBits=allRouterParams(i).user.payloadBits)
)
val ingressParams = allIngressParams.filter(_.destId == i).map(
_.copy(payloadBits=allRouterParams(i).user.payloadBits)
)
val egressParams = allEgressParams.filter(_.srcId == i).map(
_.copy(payloadBits=allRouterParams(i).user.payloadBits)
)
val noIn = inParams.size + ingressParams.size == 0
val noOut = outParams.size + egressParams.size == 0
if (noIn || noOut) {
println(s"Constellation WARNING: $nocName router $i seems to be unused, it will not be generated")
None
} else {
Some(LazyModule(new Router(
routerParams = allRouterParams(i),
preDiplomaticInParams = inParams,
preDiplomaticIngressParams = ingressParams,
outDests = outParams.map(_.destId),
egressIds = egressParams.map(_.egressId)
)(iP)))
}
}}.flatten
val ingressNodes = allIngressParams.map { u => IngressChannelSourceNode(u.destId) }
val egressNodes = allEgressParams.map { u => EgressChannelDestNode(u) }
// Generate channels between routers diplomatically
Seq.tabulate(nNodes, nNodes) { case (i, j) => if (i != j) {
val routerI = routers.find(_.nodeId == i)
val routerJ = routers.find(_.nodeId == j)
if (routerI.isDefined && routerJ.isDefined) {
val sourceNodes: Seq[ChannelSourceNode] = routerI.get.sourceNodes.filter(_.destId == j)
val destNodes: Seq[ChannelDestNode] = routerJ.get.destNodes.filter(_.destParams.srcId == i)
require (sourceNodes.size == destNodes.size)
(sourceNodes zip destNodes).foreach { case (src, dst) =>
val channelParam = allChannelParams.find(c => c.srcId == i && c.destId == j).get
router_sink_domains(j) {
implicit val p: Parameters = iP
(dst
:= ChannelWidthWidget(routerJ.get.payloadBits, routerI.get.payloadBits)
:= channelParam.channelGen(p)(src)
)
}
}
}
}}
// Generate terminal channels diplomatically
routers.foreach { dst => router_sink_domains(dst.nodeId) {
implicit val p: Parameters = iP
dst.ingressNodes.foreach(n => {
val ingressId = n.destParams.ingressId
require(dst.payloadBits <= allIngressParams(ingressId).payloadBits)
(n
:= IngressWidthWidget(dst.payloadBits, allIngressParams(ingressId).payloadBits)
:= ingressNodes(ingressId)
)
})
dst.egressNodes.foreach(n => {
val egressId = n.egressId
require(dst.payloadBits <= allEgressParams(egressId).payloadBits)
(egressNodes(egressId)
:= EgressWidthWidget(allEgressParams(egressId).payloadBits, dst.payloadBits)
:= n
)
})
}}
val debugNodes = routers.map { r =>
val sink = BundleBridgeSink[DebugBundle]()
sink := r.debugNode
sink
}
val ctrlNodes = if (nocParams.hasCtrl) {
(0 until nNodes).map { i =>
routers.find(_.nodeId == i).map { r =>
val sink = BundleBridgeSink[RouterCtrlBundle]()
sink := r.ctrlNode.get
sink
}
}
} else {
Nil
}
println(s"Constellation: $nocName Finished parameter validation")
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
println(s"Constellation: $nocName Starting NoC RTL generation")
val io = IO(new NoCTerminalIO(allIngressParams, allEgressParams)(iP) {
val router_clocks = Vec(nNodes, Input(new ClockBundle(ClockBundleParameters())))
val router_ctrl = if (nocParams.hasCtrl) Vec(nNodes, new RouterCtrlBundle) else Nil
})
(io.ingress zip ingressNodes.map(_.out(0)._1)).foreach { case (l,r) => r <> l }
(io.egress zip egressNodes .map(_.in (0)._1)).foreach { case (l,r) => l <> r }
(io.router_clocks zip clockSourceNodes.map(_.out(0)._1)).foreach { case (l,r) => l <> r }
if (nocParams.hasCtrl) {
ctrlNodes.zipWithIndex.map { case (c,i) =>
if (c.isDefined) {
io.router_ctrl(i) <> c.get.in(0)._1
} else {
io.router_ctrl(i) <> DontCare
}
}
}
// TODO: These assume a single clock-domain across the entire noc
val debug_va_stall_ctr = RegInit(0.U(64.W))
val debug_sa_stall_ctr = RegInit(0.U(64.W))
val debug_any_stall_ctr = debug_va_stall_ctr + debug_sa_stall_ctr
debug_va_stall_ctr := debug_va_stall_ctr + debugNodes.map(_.in(0)._1.va_stall.reduce(_+_)).reduce(_+_)
debug_sa_stall_ctr := debug_sa_stall_ctr + debugNodes.map(_.in(0)._1.sa_stall.reduce(_+_)).reduce(_+_)
dontTouch(debug_va_stall_ctr)
dontTouch(debug_sa_stall_ctr)
dontTouch(debug_any_stall_ctr)
def prepend(s: String) = Seq(nocName, s).mkString(".")
ElaborationArtefacts.add(prepend("noc.graphml"), graphML)
val adjList = routers.map { r =>
val outs = r.outParams.map(o => s"${o.destId}").mkString(" ")
val egresses = r.egressParams.map(e => s"e${e.egressId}").mkString(" ")
val ingresses = r.ingressParams.map(i => s"i${i.ingressId} ${r.nodeId}")
(Seq(s"${r.nodeId} $outs $egresses") ++ ingresses).mkString("\n")
}.mkString("\n")
ElaborationArtefacts.add(prepend("noc.adjlist"), adjList)
val xys = routers.map(r => {
val n = r.nodeId
val ids = (Seq(r.nodeId.toString)
++ r.egressParams.map(e => s"e${e.egressId}")
++ r.ingressParams.map(i => s"i${i.ingressId}")
)
val plotter = nocParams.topology.plotter
val coords = (Seq(plotter.node(r.nodeId))
++ Seq.tabulate(r.egressParams.size ) { i => plotter. egress(i, r. egressParams.size, r.nodeId) }
++ Seq.tabulate(r.ingressParams.size) { i => plotter.ingress(i, r.ingressParams.size, r.nodeId) }
)
(ids zip coords).map { case (i, (x, y)) => s"$i $x $y" }.mkString("\n")
}).mkString("\n")
ElaborationArtefacts.add(prepend("noc.xy"), xys)
val edgeProps = routers.map { r =>
val outs = r.outParams.map { o =>
(Seq(s"${r.nodeId} ${o.destId}") ++ (if (o.possibleFlows.size == 0) Some("unused") else None))
.mkString(" ")
}
val egresses = r.egressParams.map { e =>
(Seq(s"${r.nodeId} e${e.egressId}") ++ (if (e.possibleFlows.size == 0) Some("unused") else None))
.mkString(" ")
}
val ingresses = r.ingressParams.map { i =>
(Seq(s"i${i.ingressId} ${r.nodeId}") ++ (if (i.possibleFlows.size == 0) Some("unused") else None))
.mkString(" ")
}
(outs ++ egresses ++ ingresses).mkString("\n")
}.mkString("\n")
ElaborationArtefacts.add(prepend("noc.edgeprops"), edgeProps)
println(s"Constellation: $nocName Finished NoC RTL generation")
}
}
| module TLNoC_router_3ClockSinkDomain( // @[ClockDomain.scala:14:9]
output [2:0] auto_routers_debug_out_va_stall_0, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_debug_out_va_stall_2, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_debug_out_sa_stall_0, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_debug_out_sa_stall_2, // @[LazyModuleImp.scala:107:25]
input auto_routers_egress_nodes_out_2_flit_ready, // @[LazyModuleImp.scala:107:25]
output auto_routers_egress_nodes_out_2_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_routers_egress_nodes_out_2_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_routers_egress_nodes_out_2_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
input auto_routers_egress_nodes_out_1_flit_ready, // @[LazyModuleImp.scala:107:25]
output auto_routers_egress_nodes_out_1_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_routers_egress_nodes_out_1_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_routers_egress_nodes_out_1_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
input auto_routers_egress_nodes_out_0_flit_ready, // @[LazyModuleImp.scala:107:25]
output auto_routers_egress_nodes_out_0_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_routers_egress_nodes_out_0_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_routers_egress_nodes_out_0_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_routers_egress_nodes_out_0_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
output auto_routers_ingress_nodes_in_1_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_1_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_1_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_1_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_routers_ingress_nodes_in_1_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_routers_ingress_nodes_in_1_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_0_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_flit_0_valid, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_routers_source_nodes_out_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_source_nodes_out_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_routers_source_nodes_out_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_source_nodes_out_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_routers_source_nodes_out_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_source_nodes_out_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_source_nodes_out_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_routers_source_nodes_out_credit_return, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_routers_source_nodes_out_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_flit_0_valid, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_routers_dest_nodes_in_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_routers_dest_nodes_in_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_routers_dest_nodes_in_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_routers_dest_nodes_in_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_routers_dest_nodes_in_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_routers_dest_nodes_in_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_routers_dest_nodes_in_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_routers_dest_nodes_in_credit_return, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_routers_dest_nodes_in_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_clock_in_clock, // @[LazyModuleImp.scala:107:25]
input auto_clock_in_reset // @[LazyModuleImp.scala:107:25]
);
Router_3 routers ( // @[NoC.scala:67:22]
.clock (auto_clock_in_clock),
.reset (auto_clock_in_reset),
.auto_debug_out_va_stall_0 (auto_routers_debug_out_va_stall_0),
.auto_debug_out_va_stall_2 (auto_routers_debug_out_va_stall_2),
.auto_debug_out_sa_stall_0 (auto_routers_debug_out_sa_stall_0),
.auto_debug_out_sa_stall_2 (auto_routers_debug_out_sa_stall_2),
.auto_egress_nodes_out_2_flit_ready (auto_routers_egress_nodes_out_2_flit_ready),
.auto_egress_nodes_out_2_flit_valid (auto_routers_egress_nodes_out_2_flit_valid),
.auto_egress_nodes_out_2_flit_bits_head (auto_routers_egress_nodes_out_2_flit_bits_head),
.auto_egress_nodes_out_2_flit_bits_tail (auto_routers_egress_nodes_out_2_flit_bits_tail),
.auto_egress_nodes_out_1_flit_ready (auto_routers_egress_nodes_out_1_flit_ready),
.auto_egress_nodes_out_1_flit_valid (auto_routers_egress_nodes_out_1_flit_valid),
.auto_egress_nodes_out_1_flit_bits_head (auto_routers_egress_nodes_out_1_flit_bits_head),
.auto_egress_nodes_out_1_flit_bits_tail (auto_routers_egress_nodes_out_1_flit_bits_tail),
.auto_egress_nodes_out_0_flit_ready (auto_routers_egress_nodes_out_0_flit_ready),
.auto_egress_nodes_out_0_flit_valid (auto_routers_egress_nodes_out_0_flit_valid),
.auto_egress_nodes_out_0_flit_bits_head (auto_routers_egress_nodes_out_0_flit_bits_head),
.auto_egress_nodes_out_0_flit_bits_tail (auto_routers_egress_nodes_out_0_flit_bits_tail),
.auto_egress_nodes_out_0_flit_bits_payload (auto_routers_egress_nodes_out_0_flit_bits_payload),
.auto_ingress_nodes_in_1_flit_ready (auto_routers_ingress_nodes_in_1_flit_ready),
.auto_ingress_nodes_in_1_flit_valid (auto_routers_ingress_nodes_in_1_flit_valid),
.auto_ingress_nodes_in_1_flit_bits_head (auto_routers_ingress_nodes_in_1_flit_bits_head),
.auto_ingress_nodes_in_1_flit_bits_tail (auto_routers_ingress_nodes_in_1_flit_bits_tail),
.auto_ingress_nodes_in_1_flit_bits_payload (auto_routers_ingress_nodes_in_1_flit_bits_payload),
.auto_ingress_nodes_in_1_flit_bits_egress_id (auto_routers_ingress_nodes_in_1_flit_bits_egress_id),
.auto_ingress_nodes_in_0_flit_valid (auto_routers_ingress_nodes_in_0_flit_valid),
.auto_source_nodes_out_flit_0_valid (auto_routers_source_nodes_out_flit_0_valid),
.auto_source_nodes_out_flit_0_bits_head (auto_routers_source_nodes_out_flit_0_bits_head),
.auto_source_nodes_out_flit_0_bits_tail (auto_routers_source_nodes_out_flit_0_bits_tail),
.auto_source_nodes_out_flit_0_bits_payload (auto_routers_source_nodes_out_flit_0_bits_payload),
.auto_source_nodes_out_flit_0_bits_flow_vnet_id (auto_routers_source_nodes_out_flit_0_bits_flow_vnet_id),
.auto_source_nodes_out_flit_0_bits_flow_ingress_node (auto_routers_source_nodes_out_flit_0_bits_flow_ingress_node),
.auto_source_nodes_out_flit_0_bits_flow_ingress_node_id (auto_routers_source_nodes_out_flit_0_bits_flow_ingress_node_id),
.auto_source_nodes_out_flit_0_bits_flow_egress_node (auto_routers_source_nodes_out_flit_0_bits_flow_egress_node),
.auto_source_nodes_out_flit_0_bits_flow_egress_node_id (auto_routers_source_nodes_out_flit_0_bits_flow_egress_node_id),
.auto_source_nodes_out_flit_0_bits_virt_channel_id (auto_routers_source_nodes_out_flit_0_bits_virt_channel_id),
.auto_source_nodes_out_credit_return (auto_routers_source_nodes_out_credit_return),
.auto_source_nodes_out_vc_free (auto_routers_source_nodes_out_vc_free),
.auto_dest_nodes_in_flit_0_valid (auto_routers_dest_nodes_in_flit_0_valid),
.auto_dest_nodes_in_flit_0_bits_head (auto_routers_dest_nodes_in_flit_0_bits_head),
.auto_dest_nodes_in_flit_0_bits_tail (auto_routers_dest_nodes_in_flit_0_bits_tail),
.auto_dest_nodes_in_flit_0_bits_payload (auto_routers_dest_nodes_in_flit_0_bits_payload),
.auto_dest_nodes_in_flit_0_bits_flow_vnet_id (auto_routers_dest_nodes_in_flit_0_bits_flow_vnet_id),
.auto_dest_nodes_in_flit_0_bits_flow_ingress_node (auto_routers_dest_nodes_in_flit_0_bits_flow_ingress_node),
.auto_dest_nodes_in_flit_0_bits_flow_ingress_node_id (auto_routers_dest_nodes_in_flit_0_bits_flow_ingress_node_id),
.auto_dest_nodes_in_flit_0_bits_flow_egress_node (auto_routers_dest_nodes_in_flit_0_bits_flow_egress_node),
.auto_dest_nodes_in_flit_0_bits_flow_egress_node_id (auto_routers_dest_nodes_in_flit_0_bits_flow_egress_node_id),
.auto_dest_nodes_in_flit_0_bits_virt_channel_id (auto_routers_dest_nodes_in_flit_0_bits_virt_channel_id),
.auto_dest_nodes_in_credit_return (auto_routers_dest_nodes_in_credit_return),
.auto_dest_nodes_in_vc_free (auto_routers_dest_nodes_in_vc_free)
); // @[NoC.scala:67:22]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_65( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
output io_q // @[ShiftReg.scala:36:14]
);
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire io_d = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41]
wire _output_T_1 = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_97 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File Nodes.scala:
package constellation.channel
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Parameters, Field}
import freechips.rocketchip.diplomacy._
case class EmptyParams()
case class ChannelEdgeParams(cp: ChannelParams, p: Parameters)
object ChannelImp extends SimpleNodeImp[EmptyParams, ChannelParams, ChannelEdgeParams, Channel] {
def edge(pd: EmptyParams, pu: ChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
ChannelEdgeParams(pu, p)
}
def bundle(e: ChannelEdgeParams) = new Channel(e.cp)(e.p)
def render(e: ChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#0000ff", label = e.cp.payloadBits.toString)
}
override def monitor(bundle: Channel, edge: ChannelEdgeParams): Unit = {
val monitor = Module(new NoCMonitor(edge.cp)(edge.p))
monitor.io.in := bundle
}
// TODO: Add nodepath stuff? override def mixO, override def mixI
}
case class ChannelSourceNode(val destId: Int)(implicit valName: ValName) extends SourceNode(ChannelImp)(Seq(EmptyParams()))
case class ChannelDestNode(val destParams: ChannelParams)(implicit valName: ValName) extends SinkNode(ChannelImp)(Seq(destParams))
case class ChannelAdapterNode(
slaveFn: ChannelParams => ChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(ChannelImp)((e: EmptyParams) => e, slaveFn)
case class ChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(ChannelImp)()
case class ChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(ChannelImp)()
case class IngressChannelEdgeParams(cp: IngressChannelParams, p: Parameters)
case class EgressChannelEdgeParams(cp: EgressChannelParams, p: Parameters)
object IngressChannelImp extends SimpleNodeImp[EmptyParams, IngressChannelParams, IngressChannelEdgeParams, IngressChannel] {
def edge(pd: EmptyParams, pu: IngressChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
IngressChannelEdgeParams(pu, p)
}
def bundle(e: IngressChannelEdgeParams) = new IngressChannel(e.cp)(e.p)
def render(e: IngressChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#00ff00", label = e.cp.payloadBits.toString)
}
}
object EgressChannelImp extends SimpleNodeImp[EmptyParams, EgressChannelParams, EgressChannelEdgeParams, EgressChannel] {
def edge(pd: EmptyParams, pu: EgressChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
EgressChannelEdgeParams(pu, p)
}
def bundle(e: EgressChannelEdgeParams) = new EgressChannel(e.cp)(e.p)
def render(e: EgressChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#ff0000", label = e.cp.payloadBits.toString)
}
}
case class IngressChannelSourceNode(val destId: Int)(implicit valName: ValName) extends SourceNode(IngressChannelImp)(Seq(EmptyParams()))
case class IngressChannelDestNode(val destParams: IngressChannelParams)(implicit valName: ValName) extends SinkNode(IngressChannelImp)(Seq(destParams))
case class EgressChannelSourceNode(val egressId: Int)(implicit valName: ValName) extends SourceNode(EgressChannelImp)(Seq(EmptyParams()))
case class EgressChannelDestNode(val destParams: EgressChannelParams)(implicit valName: ValName) extends SinkNode(EgressChannelImp)(Seq(destParams))
case class IngressChannelAdapterNode(
slaveFn: IngressChannelParams => IngressChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(IngressChannelImp)(m => m, slaveFn)
case class EgressChannelAdapterNode(
slaveFn: EgressChannelParams => EgressChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(EgressChannelImp)(m => m, slaveFn)
case class IngressChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(IngressChannelImp)()
case class EgressChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(EgressChannelImp)()
case class IngressChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(IngressChannelImp)()
case class EgressChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(EgressChannelImp)()
File Router.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{RoutingRelation}
import constellation.noc.{HasNoCParams}
case class UserRouterParams(
// Payload width. Must match payload width on all channels attached to this routing node
payloadBits: Int = 64,
// Combines SA and ST stages (removes pipeline register)
combineSAST: Boolean = false,
// Combines RC and VA stages (removes pipeline register)
combineRCVA: Boolean = false,
// Adds combinational path from SA to VA
coupleSAVA: Boolean = false,
vcAllocator: VCAllocatorParams => Parameters => VCAllocator = (vP) => (p) => new RotatingSingleVCAllocator(vP)(p)
)
case class RouterParams(
nodeId: Int,
nIngress: Int,
nEgress: Int,
user: UserRouterParams
)
trait HasRouterOutputParams {
def outParams: Seq[ChannelParams]
def egressParams: Seq[EgressChannelParams]
def allOutParams = outParams ++ egressParams
def nOutputs = outParams.size
def nEgress = egressParams.size
def nAllOutputs = allOutParams.size
}
trait HasRouterInputParams {
def inParams: Seq[ChannelParams]
def ingressParams: Seq[IngressChannelParams]
def allInParams = inParams ++ ingressParams
def nInputs = inParams.size
def nIngress = ingressParams.size
def nAllInputs = allInParams.size
}
trait HasRouterParams
{
def routerParams: RouterParams
def nodeId = routerParams.nodeId
def payloadBits = routerParams.user.payloadBits
}
class DebugBundle(val nIn: Int) extends Bundle {
val va_stall = Vec(nIn, UInt())
val sa_stall = Vec(nIn, UInt())
}
class Router(
val routerParams: RouterParams,
preDiplomaticInParams: Seq[ChannelParams],
preDiplomaticIngressParams: Seq[IngressChannelParams],
outDests: Seq[Int],
egressIds: Seq[Int]
)(implicit p: Parameters) extends LazyModule with HasNoCParams with HasRouterParams {
val allPreDiplomaticInParams = preDiplomaticInParams ++ preDiplomaticIngressParams
val destNodes = preDiplomaticInParams.map(u => ChannelDestNode(u))
val sourceNodes = outDests.map(u => ChannelSourceNode(u))
val ingressNodes = preDiplomaticIngressParams.map(u => IngressChannelDestNode(u))
val egressNodes = egressIds.map(u => EgressChannelSourceNode(u))
val debugNode = BundleBridgeSource(() => new DebugBundle(allPreDiplomaticInParams.size))
val ctrlNode = if (hasCtrl) Some(BundleBridgeSource(() => new RouterCtrlBundle)) else None
def inParams = module.inParams
def outParams = module.outParams
def ingressParams = module.ingressParams
def egressParams = module.egressParams
lazy val module = new LazyModuleImp(this) with HasRouterInputParams with HasRouterOutputParams {
val (io_in, edgesIn) = destNodes.map(_.in(0)).unzip
val (io_out, edgesOut) = sourceNodes.map(_.out(0)).unzip
val (io_ingress, edgesIngress) = ingressNodes.map(_.in(0)).unzip
val (io_egress, edgesEgress) = egressNodes.map(_.out(0)).unzip
val io_debug = debugNode.out(0)._1
val inParams = edgesIn.map(_.cp)
val outParams = edgesOut.map(_.cp)
val ingressParams = edgesIngress.map(_.cp)
val egressParams = edgesEgress.map(_.cp)
allOutParams.foreach(u => require(u.srcId == nodeId && u.payloadBits == routerParams.user.payloadBits))
allInParams.foreach(u => require(u.destId == nodeId && u.payloadBits == routerParams.user.payloadBits))
require(nIngress == routerParams.nIngress)
require(nEgress == routerParams.nEgress)
require(nAllInputs >= 1)
require(nAllOutputs >= 1)
require(nodeId < (1 << nodeIdBits))
val input_units = inParams.zipWithIndex.map { case (u,i) =>
Module(new InputUnit(u, outParams, egressParams,
routerParams.user.combineRCVA, routerParams.user.combineSAST))
.suggestName(s"input_unit_${i}_from_${u.srcId}") }
val ingress_units = ingressParams.zipWithIndex.map { case (u,i) =>
Module(new IngressUnit(i, u, outParams, egressParams,
routerParams.user.combineRCVA, routerParams.user.combineSAST))
.suggestName(s"ingress_unit_${i+nInputs}_from_${u.ingressId}") }
val all_input_units = input_units ++ ingress_units
val output_units = outParams.zipWithIndex.map { case (u,i) =>
Module(new OutputUnit(inParams, ingressParams, u))
.suggestName(s"output_unit_${i}_to_${u.destId}")}
val egress_units = egressParams.zipWithIndex.map { case (u,i) =>
Module(new EgressUnit(routerParams.user.coupleSAVA && all_input_units.size == 1,
routerParams.user.combineSAST,
inParams, ingressParams, u))
.suggestName(s"egress_unit_${i+nOutputs}_to_${u.egressId}")}
val all_output_units = output_units ++ egress_units
val switch = Module(new Switch(routerParams, inParams, outParams, ingressParams, egressParams))
val switch_allocator = Module(new SwitchAllocator(routerParams, inParams, outParams, ingressParams, egressParams))
val vc_allocator = Module(routerParams.user.vcAllocator(
VCAllocatorParams(routerParams, inParams, outParams, ingressParams, egressParams)
)(p))
val route_computer = Module(new RouteComputer(routerParams, inParams, outParams, ingressParams, egressParams))
val fires_count = WireInit(PopCount(vc_allocator.io.req.map(_.fire)))
dontTouch(fires_count)
(io_in zip input_units ).foreach { case (i,u) => u.io.in <> i }
(io_ingress zip ingress_units).foreach { case (i,u) => u.io.in <> i.flit }
(output_units zip io_out ).foreach { case (u,o) => o <> u.io.out }
(egress_units zip io_egress).foreach { case (u,o) => o.flit <> u.io.out }
(route_computer.io.req zip all_input_units).foreach {
case (i,u) => i <> u.io.router_req }
(all_input_units zip route_computer.io.resp).foreach {
case (u,o) => u.io.router_resp <> o }
(vc_allocator.io.req zip all_input_units).foreach {
case (i,u) => i <> u.io.vcalloc_req }
(all_input_units zip vc_allocator.io.resp).foreach {
case (u,o) => u.io.vcalloc_resp <> o }
(all_output_units zip vc_allocator.io.out_allocs).foreach {
case (u,a) => u.io.allocs <> a }
(vc_allocator.io.channel_status zip all_output_units).foreach {
case (a,u) => a := u.io.channel_status }
all_input_units.foreach(in => all_output_units.zipWithIndex.foreach { case (out,outIdx) =>
in.io.out_credit_available(outIdx) := out.io.credit_available
})
(all_input_units zip switch_allocator.io.req).foreach {
case (u,r) => r <> u.io.salloc_req }
(all_output_units zip switch_allocator.io.credit_alloc).foreach {
case (u,a) => u.io.credit_alloc := a }
(switch.io.in zip all_input_units).foreach {
case (i,u) => i <> u.io.out }
(all_output_units zip switch.io.out).foreach {
case (u,o) => u.io.in <> o }
switch.io.sel := (if (routerParams.user.combineSAST) {
switch_allocator.io.switch_sel
} else {
RegNext(switch_allocator.io.switch_sel)
})
if (hasCtrl) {
val io_ctrl = ctrlNode.get.out(0)._1
val ctrl = Module(new RouterControlUnit(routerParams, inParams, outParams, ingressParams, egressParams))
io_ctrl <> ctrl.io.ctrl
(all_input_units zip ctrl.io.in_block ).foreach { case (l,r) => l.io.block := r }
(all_input_units zip ctrl.io.in_fire ).foreach { case (l,r) => r := l.io.out.map(_.valid) }
} else {
input_units.foreach(_.io.block := false.B)
ingress_units.foreach(_.io.block := false.B)
}
(io_debug.va_stall zip all_input_units.map(_.io.debug.va_stall)).map { case (l,r) => l := r }
(io_debug.sa_stall zip all_input_units.map(_.io.debug.sa_stall)).map { case (l,r) => l := r }
val debug_tsc = RegInit(0.U(64.W))
debug_tsc := debug_tsc + 1.U
val debug_sample = RegInit(0.U(64.W))
debug_sample := debug_sample + 1.U
val sample_rate = PlusArg("noc_util_sample_rate", width=20)
when (debug_sample === sample_rate - 1.U) { debug_sample := 0.U }
def sample(fire: Bool, s: String) = {
val util_ctr = RegInit(0.U(64.W))
val fired = RegInit(false.B)
util_ctr := util_ctr + fire
fired := fired || fire
when (sample_rate =/= 0.U && debug_sample === sample_rate - 1.U && fired) {
val fmtStr = s"nocsample %d $s %d\n"
printf(fmtStr, debug_tsc, util_ctr);
fired := fire
}
}
destNodes.map(_.in(0)).foreach { case (in, edge) => in.flit.map { f =>
sample(f.fire, s"${edge.cp.srcId} $nodeId")
} }
ingressNodes.map(_.in(0)).foreach { case (in, edge) =>
sample(in.flit.fire, s"i${edge.cp.asInstanceOf[IngressChannelParams].ingressId} $nodeId")
}
egressNodes.map(_.out(0)).foreach { case (out, edge) =>
sample(out.flit.fire, s"$nodeId e${edge.cp.asInstanceOf[EgressChannelParams].egressId}")
}
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
| module Router_8( // @[Router.scala:89:25]
input clock, // @[Router.scala:89:25]
input reset, // @[Router.scala:89:25]
output [3:0] auto_debug_out_va_stall_0, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_debug_out_va_stall_1, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_debug_out_va_stall_2, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_debug_out_sa_stall_0, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_debug_out_sa_stall_1, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_debug_out_sa_stall_2, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_2_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_2_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_2_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_egress_nodes_out_2_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input auto_egress_nodes_out_1_flit_ready, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_1_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_1_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_1_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_egress_nodes_out_1_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input auto_egress_nodes_out_0_flit_ready, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_0_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_0_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_0_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_egress_nodes_out_0_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
output auto_ingress_nodes_in_1_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_1_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_1_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_1_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_ingress_nodes_in_1_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_ingress_nodes_in_1_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
output auto_ingress_nodes_in_0_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_0_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_0_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_0_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_ingress_nodes_in_0_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_ingress_nodes_in_0_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_flit_0_valid, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_source_nodes_out_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_source_nodes_out_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_source_nodes_out_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_source_nodes_out_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
input [9:0] auto_source_nodes_out_credit_return, // @[LazyModuleImp.scala:107:25]
input [9:0] auto_source_nodes_out_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_flit_0_valid, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_dest_nodes_in_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dest_nodes_in_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dest_nodes_in_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dest_nodes_in_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
output [9:0] auto_dest_nodes_in_credit_return, // @[LazyModuleImp.scala:107:25]
output [9:0] auto_dest_nodes_in_vc_free // @[LazyModuleImp.scala:107:25]
);
wire [19:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire _route_computer_io_resp_2_vc_sel_0_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_0_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_0_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_0_3; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_0_4; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_0_5; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_0_6; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_0_7; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_0_8; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_0_9; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_3; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_4; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_5; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_6; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_7; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_8; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_9; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_0_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_0_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_0_4; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_0_6; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_0_8; // @[Router.scala:136:32]
wire _vc_allocator_io_req_2_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_req_1_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_req_0_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_3_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_2_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_1_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_3; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_4; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_5; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_6; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_7; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_8; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_9; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_3_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_2_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_1_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_3; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_4; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_5; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_6; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_7; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_8; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_9; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_3_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_2_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_1_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_0_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_0_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_0_4; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_0_6; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_0_8; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_3_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_2_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_1_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_2_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_3_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_6_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_7_alloc; // @[Router.scala:133:30]
wire _switch_allocator_io_req_2_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_req_1_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_req_0_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_3_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_3_0_tail; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_2_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_2_0_tail; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_1_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_1_0_tail; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_2_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_3_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_6_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_7_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_3_0_2_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_3_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_3_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_2_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_2_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_2_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_0_0; // @[Router.scala:132:34]
wire _switch_io_out_3_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_3_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_3_0_bits_tail; // @[Router.scala:131:24]
wire [72:0] _switch_io_out_3_0_bits_payload; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_3_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_3_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire _switch_io_out_2_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_2_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_2_0_bits_tail; // @[Router.scala:131:24]
wire [72:0] _switch_io_out_2_0_bits_payload; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_2_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_2_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire _switch_io_out_1_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_1_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_1_0_bits_tail; // @[Router.scala:131:24]
wire [72:0] _switch_io_out_1_0_bits_payload; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_1_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_1_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire _switch_io_out_0_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_0_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_0_0_bits_tail; // @[Router.scala:131:24]
wire [72:0] _switch_io_out_0_0_bits_payload; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_0_0_bits_flow_vnet_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_0_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_0_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_0_0_bits_flow_egress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_0_0_bits_flow_egress_node_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_0_0_bits_virt_channel_id; // @[Router.scala:131:24]
wire _egress_unit_3_to_24_io_credit_available_0; // @[Router.scala:125:13]
wire _egress_unit_3_to_24_io_channel_status_0_occupied; // @[Router.scala:125:13]
wire _egress_unit_3_to_24_io_out_valid; // @[Router.scala:125:13]
wire _egress_unit_2_to_23_io_credit_available_0; // @[Router.scala:125:13]
wire _egress_unit_2_to_23_io_channel_status_0_occupied; // @[Router.scala:125:13]
wire _egress_unit_2_to_23_io_out_valid; // @[Router.scala:125:13]
wire _egress_unit_1_to_22_io_credit_available_0; // @[Router.scala:125:13]
wire _egress_unit_1_to_22_io_channel_status_0_occupied; // @[Router.scala:125:13]
wire _egress_unit_1_to_22_io_out_valid; // @[Router.scala:125:13]
wire _output_unit_0_to_0_io_credit_available_2; // @[Router.scala:122:13]
wire _output_unit_0_to_0_io_credit_available_3; // @[Router.scala:122:13]
wire _output_unit_0_to_0_io_credit_available_6; // @[Router.scala:122:13]
wire _output_unit_0_to_0_io_credit_available_7; // @[Router.scala:122:13]
wire _output_unit_0_to_0_io_channel_status_2_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_0_io_channel_status_3_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_0_io_channel_status_6_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_0_io_channel_status_7_occupied; // @[Router.scala:122:13]
wire [3:0] _ingress_unit_2_from_24_io_router_req_bits_flow_egress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_2_from_24_io_router_req_bits_flow_egress_node_id; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_vcalloc_req_valid; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_vcalloc_req_bits_vc_sel_3_0; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_vcalloc_req_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_vcalloc_req_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_vcalloc_req_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_vcalloc_req_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_vcalloc_req_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_vcalloc_req_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_vcalloc_req_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_salloc_req_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_salloc_req_0_bits_vc_sel_3_0; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_salloc_req_0_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_salloc_req_0_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_salloc_req_0_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_salloc_req_0_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_salloc_req_0_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_salloc_req_0_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_salloc_req_0_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_salloc_req_0_bits_tail; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_out_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_out_0_bits_flit_head; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_out_0_bits_flit_tail; // @[Router.scala:116:13]
wire [72:0] _ingress_unit_2_from_24_io_out_0_bits_flit_payload; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_2_from_24_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_2_from_24_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_2_from_24_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_2_from_24_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_2_from_24_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_2_from_24_io_out_0_bits_out_virt_channel; // @[Router.scala:116:13]
wire _ingress_unit_2_from_24_io_in_ready; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_1_from_23_io_router_req_bits_flow_egress_node; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_vcalloc_req_valid; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_vcalloc_req_bits_vc_sel_3_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_vcalloc_req_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_vcalloc_req_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_vcalloc_req_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_vcalloc_req_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_vcalloc_req_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_vcalloc_req_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_vcalloc_req_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_salloc_req_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_salloc_req_0_bits_vc_sel_3_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_salloc_req_0_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_salloc_req_0_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_salloc_req_0_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_salloc_req_0_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_salloc_req_0_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_salloc_req_0_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_salloc_req_0_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_salloc_req_0_bits_tail; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_out_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_out_0_bits_flit_head; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_out_0_bits_flit_tail; // @[Router.scala:116:13]
wire [72:0] _ingress_unit_1_from_23_io_out_0_bits_flit_payload; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_1_from_23_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_1_from_23_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_1_from_23_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_1_from_23_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_1_from_23_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_1_from_23_io_out_0_bits_out_virt_channel; // @[Router.scala:116:13]
wire _ingress_unit_1_from_23_io_in_ready; // @[Router.scala:116:13]
wire [3:0] _input_unit_0_from_7_io_router_req_bits_src_virt_id; // @[Router.scala:112:13]
wire [2:0] _input_unit_0_from_7_io_router_req_bits_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_7_io_router_req_bits_flow_ingress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_0_from_7_io_router_req_bits_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_7_io_router_req_bits_flow_egress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_0_from_7_io_router_req_bits_flow_egress_node_id; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_vcalloc_req_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_vcalloc_req_bits_vc_sel_3_0; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_vcalloc_req_bits_vc_sel_0_4; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_vcalloc_req_bits_vc_sel_0_6; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_vcalloc_req_bits_vc_sel_0_8; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_salloc_req_0_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_salloc_req_0_bits_vc_sel_3_0; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_salloc_req_0_bits_vc_sel_0_3; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_salloc_req_0_bits_vc_sel_0_4; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_salloc_req_0_bits_vc_sel_0_5; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_salloc_req_0_bits_vc_sel_0_6; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_salloc_req_0_bits_vc_sel_0_7; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_salloc_req_0_bits_vc_sel_0_8; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_salloc_req_0_bits_vc_sel_0_9; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_salloc_req_0_bits_tail; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_out_0_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_out_0_bits_flit_head; // @[Router.scala:112:13]
wire _input_unit_0_from_7_io_out_0_bits_flit_tail; // @[Router.scala:112:13]
wire [72:0] _input_unit_0_from_7_io_out_0_bits_flit_payload; // @[Router.scala:112:13]
wire [2:0] _input_unit_0_from_7_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_7_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_0_from_7_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_7_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_0_from_7_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_7_io_out_0_bits_out_virt_channel; // @[Router.scala:112:13]
wire [1:0] fires_count = {1'h0, _vc_allocator_io_req_0_ready & _input_unit_0_from_7_io_vcalloc_req_valid} + {1'h0, _vc_allocator_io_req_1_ready & _ingress_unit_1_from_23_io_vcalloc_req_valid} + {1'h0, _vc_allocator_io_req_2_ready & _ingress_unit_2_from_24_io_vcalloc_req_valid}; // @[Decoupled.scala:51:35]
reg REG_3_0_2_0; // @[Router.scala:178:14]
reg REG_3_0_1_0; // @[Router.scala:178:14]
reg REG_3_0_0_0; // @[Router.scala:178:14]
reg REG_2_0_2_0; // @[Router.scala:178:14]
reg REG_2_0_1_0; // @[Router.scala:178:14]
reg REG_2_0_0_0; // @[Router.scala:178:14]
reg REG_1_0_2_0; // @[Router.scala:178:14]
reg REG_1_0_1_0; // @[Router.scala:178:14]
reg REG_1_0_0_0; // @[Router.scala:178:14]
reg REG_0_0_2_0; // @[Router.scala:178:14]
reg REG_0_0_1_0; // @[Router.scala:178:14]
reg REG_0_0_0_0; // @[Router.scala:178:14]
reg [63:0] debug_tsc; // @[Router.scala:195:28]
reg [63:0] debug_sample; // @[Router.scala:197:31]
wire _GEN = debug_sample == {44'h0, _plusarg_reader_out - 20'h1}; // @[PlusArg.scala:80:11]
reg [63:0] util_ctr; // @[Router.scala:203:29]
reg fired; // @[Router.scala:204:26]
wire _GEN_0 = (|_plusarg_reader_out) & _GEN; // @[PlusArg.scala:80:11]
wire _GEN_1 = _GEN_0 & fired; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_1; // @[Router.scala:203:29]
reg fired_1; // @[Router.scala:204:26]
wire _GEN_2 = _GEN_0 & fired_1; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_2; // @[Router.scala:203:29]
reg fired_2; // @[Router.scala:204:26]
wire _GEN_3 = _GEN_0 & fired_2; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_3; // @[Router.scala:203:29]
reg fired_3; // @[Router.scala:204:26]
wire _GEN_4 = _GEN_0 & fired_3; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_4; // @[Router.scala:203:29]
reg fired_4; // @[Router.scala:204:26]
wire _GEN_5 = _GEN_0 & fired_4; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_5; // @[Router.scala:203:29]
reg fired_5; // @[Router.scala:204:26]
wire _GEN_6 = _GEN_0 & fired_5; // @[Router.scala:204:26, :207:{33,71}] |
Generate the Verilog code corresponding to the following Chisel files.
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Metadata.scala:
// See LICENSE.SiFive for license details.
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.constants.MemoryOpConstants
import freechips.rocketchip.util._
object ClientStates {
val width = 2
def Nothing = 0.U(width.W)
def Branch = 1.U(width.W)
def Trunk = 2.U(width.W)
def Dirty = 3.U(width.W)
def hasReadPermission(state: UInt): Bool = state > Nothing
def hasWritePermission(state: UInt): Bool = state > Branch
}
object MemoryOpCategories extends MemoryOpConstants {
def wr = Cat(true.B, true.B) // Op actually writes
def wi = Cat(false.B, true.B) // Future op will write
def rd = Cat(false.B, false.B) // Op only reads
def categorize(cmd: UInt): UInt = {
val cat = Cat(isWrite(cmd), isWriteIntent(cmd))
//assert(cat.isOneOf(wr,wi,rd), "Could not categorize command.")
cat
}
}
/** Stores the client-side coherence information,
* such as permissions on the data and whether the data is dirty.
* Its API can be used to make TileLink messages in response to
* memory operations, cache control oeprations, or Probe messages.
*/
class ClientMetadata extends Bundle {
/** Actual state information stored in this bundle */
val state = UInt(ClientStates.width.W)
/** Metadata equality */
def ===(rhs: UInt): Bool = state === rhs
def ===(rhs: ClientMetadata): Bool = state === rhs.state
def =/=(rhs: ClientMetadata): Bool = !this.===(rhs)
/** Is the block's data present in this cache */
def isValid(dummy: Int = 0): Bool = state > ClientStates.Nothing
/** Determine whether this cmd misses, and the new state (on hit) or param to be sent (on miss) */
private def growStarter(cmd: UInt): (Bool, UInt) = {
import MemoryOpCategories._
import TLPermissions._
import ClientStates._
val c = categorize(cmd)
MuxTLookup(Cat(c, state), (false.B, 0.U), Seq(
//(effect, am now) -> (was a hit, next)
Cat(rd, Dirty) -> (true.B, Dirty),
Cat(rd, Trunk) -> (true.B, Trunk),
Cat(rd, Branch) -> (true.B, Branch),
Cat(wi, Dirty) -> (true.B, Dirty),
Cat(wi, Trunk) -> (true.B, Trunk),
Cat(wr, Dirty) -> (true.B, Dirty),
Cat(wr, Trunk) -> (true.B, Dirty),
//(effect, am now) -> (was a miss, param)
Cat(rd, Nothing) -> (false.B, NtoB),
Cat(wi, Branch) -> (false.B, BtoT),
Cat(wi, Nothing) -> (false.B, NtoT),
Cat(wr, Branch) -> (false.B, BtoT),
Cat(wr, Nothing) -> (false.B, NtoT)))
}
/** Determine what state to go to after miss based on Grant param
* For now, doesn't depend on state (which may have been Probed).
*/
private def growFinisher(cmd: UInt, param: UInt): UInt = {
import MemoryOpCategories._
import TLPermissions._
import ClientStates._
val c = categorize(cmd)
//assert(c === rd || param === toT, "Client was expecting trunk permissions.")
MuxLookup(Cat(c, param), Nothing)(Seq(
//(effect param) -> (next)
Cat(rd, toB) -> Branch,
Cat(rd, toT) -> Trunk,
Cat(wi, toT) -> Trunk,
Cat(wr, toT) -> Dirty))
}
/** Does this cache have permissions on this block sufficient to perform op,
* and what to do next (Acquire message param or updated metadata). */
def onAccess(cmd: UInt): (Bool, UInt, ClientMetadata) = {
val r = growStarter(cmd)
(r._1, r._2, ClientMetadata(r._2))
}
/** Does a secondary miss on the block require another Acquire message */
def onSecondaryAccess(first_cmd: UInt, second_cmd: UInt): (Bool, Bool, UInt, ClientMetadata, UInt) = {
import MemoryOpCategories._
val r1 = growStarter(first_cmd)
val r2 = growStarter(second_cmd)
val needs_second_acq = isWriteIntent(second_cmd) && !isWriteIntent(first_cmd)
val hit_again = r1._1 && r2._1
val dirties = categorize(second_cmd) === wr
val biggest_grow_param = Mux(dirties, r2._2, r1._2)
val dirtiest_state = ClientMetadata(biggest_grow_param)
val dirtiest_cmd = Mux(dirties, second_cmd, first_cmd)
(needs_second_acq, hit_again, biggest_grow_param, dirtiest_state, dirtiest_cmd)
}
/** Metadata change on a returned Grant */
def onGrant(cmd: UInt, param: UInt): ClientMetadata = ClientMetadata(growFinisher(cmd, param))
/** Determine what state to go to based on Probe param */
private def shrinkHelper(param: UInt): (Bool, UInt, UInt) = {
import ClientStates._
import TLPermissions._
MuxTLookup(Cat(param, state), (false.B, 0.U, 0.U), Seq(
//(wanted, am now) -> (hasDirtyData resp, next)
Cat(toT, Dirty) -> (true.B, TtoT, Trunk),
Cat(toT, Trunk) -> (false.B, TtoT, Trunk),
Cat(toT, Branch) -> (false.B, BtoB, Branch),
Cat(toT, Nothing) -> (false.B, NtoN, Nothing),
Cat(toB, Dirty) -> (true.B, TtoB, Branch),
Cat(toB, Trunk) -> (false.B, TtoB, Branch), // Policy: Don't notify on clean downgrade
Cat(toB, Branch) -> (false.B, BtoB, Branch),
Cat(toB, Nothing) -> (false.B, NtoN, Nothing),
Cat(toN, Dirty) -> (true.B, TtoN, Nothing),
Cat(toN, Trunk) -> (false.B, TtoN, Nothing), // Policy: Don't notify on clean downgrade
Cat(toN, Branch) -> (false.B, BtoN, Nothing), // Policy: Don't notify on clean downgrade
Cat(toN, Nothing) -> (false.B, NtoN, Nothing)))
}
/** Translate cache control cmds into Probe param */
private def cmdToPermCap(cmd: UInt): UInt = {
import MemoryOpCategories._
import TLPermissions._
MuxLookup(cmd, toN)(Seq(
M_FLUSH -> toN,
M_PRODUCE -> toB,
M_CLEAN -> toT))
}
def onCacheControl(cmd: UInt): (Bool, UInt, ClientMetadata) = {
val r = shrinkHelper(cmdToPermCap(cmd))
(r._1, r._2, ClientMetadata(r._3))
}
def onProbe(param: UInt): (Bool, UInt, ClientMetadata) = {
val r = shrinkHelper(param)
(r._1, r._2, ClientMetadata(r._3))
}
}
/** Factories for ClientMetadata, including on reset */
object ClientMetadata {
def apply(perm: UInt) = {
val meta = Wire(new ClientMetadata)
meta.state := perm
meta
}
def onReset = ClientMetadata(ClientStates.Nothing)
def maximum = ClientMetadata(ClientStates.Dirty)
}
File dcache.scala:
//******************************************************************************
// Ported from Rocket-Chip
// See LICENSE.Berkeley and LICENSE.SiFive in Rocket-Chip for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v4.lsu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.tile._
import freechips.rocketchip.util._
import freechips.rocketchip.rocket._
import boom.v4.common._
import boom.v4.exu.BrUpdateInfo
import boom.v4.util._
class BoomWritebackUnit(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) {
val io = IO(new Bundle {
val req = Flipped(Decoupled(new WritebackReq(edge.bundle)))
val meta_read = Decoupled(new L1MetaReadReq)
val resp = Output(Bool())
val idx = Output(Valid(UInt()))
val data_req = Decoupled(new L1DataReadReq)
val data_resp = Input(UInt(encRowBits.W))
val mem_grant = Input(Bool())
val release = Decoupled(new TLBundleC(edge.bundle))
val lsu_release = Decoupled(new TLBundleC(edge.bundle))
})
val req = Reg(new WritebackReq(edge.bundle))
val s_invalid :: s_fill_buffer :: s_lsu_release :: s_active :: s_grant :: Nil = Enum(5)
val state = RegInit(s_invalid)
val r1_data_req_fired = RegInit(false.B)
val r2_data_req_fired = RegInit(false.B)
val r1_data_req_cnt = Reg(UInt(log2Up(refillCycles+1).W))
val r2_data_req_cnt = Reg(UInt(log2Up(refillCycles+1).W))
val data_req_cnt = RegInit(0.U(log2Up(refillCycles+1).W))
val (_, last_beat, all_beats_done, beat_count) = edge.count(io.release)
val wb_buffer = Reg(Vec(refillCycles, UInt(encRowBits.W)))
val acked = RegInit(false.B)
io.idx.valid := state =/= s_invalid
io.idx.bits := req.idx
io.release.valid := false.B
io.release.bits := DontCare
io.req.ready := false.B
io.meta_read.valid := false.B
io.meta_read.bits := DontCare
io.data_req.valid := false.B
io.data_req.bits := DontCare
io.resp := false.B
io.lsu_release.valid := false.B
io.lsu_release.bits := DontCare
val r_address = Cat(req.tag, req.idx) << blockOffBits
val id = cfg.nMSHRs
val probeResponse = edge.ProbeAck(
fromSource = req.source,
toAddress = r_address,
lgSize = lgCacheBlockBytes.U,
reportPermissions = req.param,
data = wb_buffer(data_req_cnt))
val voluntaryRelease = edge.Release(
fromSource = id.U,
toAddress = r_address,
lgSize = lgCacheBlockBytes.U,
shrinkPermissions = req.param,
data = wb_buffer(data_req_cnt))._2
when (state === s_invalid) {
io.req.ready := true.B
when (io.req.fire) {
state := s_fill_buffer
data_req_cnt := 0.U
req := io.req.bits
acked := false.B
}
} .elsewhen (state === s_fill_buffer) {
io.meta_read.valid := data_req_cnt < refillCycles.U
io.meta_read.bits.idx := req.idx
io.meta_read.bits.tag := req.tag
io.data_req.valid := data_req_cnt < refillCycles.U
io.data_req.bits.way_en := req.way_en
io.data_req.bits.addr := (if(refillCycles > 1)
Cat(req.idx, data_req_cnt(log2Up(refillCycles)-1,0))
else req.idx) << rowOffBits
r1_data_req_fired := false.B
r1_data_req_cnt := 0.U
r2_data_req_fired := r1_data_req_fired
r2_data_req_cnt := r1_data_req_cnt
when (io.data_req.fire && io.meta_read.fire) {
r1_data_req_fired := true.B
r1_data_req_cnt := data_req_cnt
data_req_cnt := data_req_cnt + 1.U
}
when (r2_data_req_fired) {
wb_buffer(r2_data_req_cnt) := io.data_resp
when (r2_data_req_cnt === (refillCycles-1).U) {
io.resp := true.B
state := s_lsu_release
data_req_cnt := 0.U
}
}
} .elsewhen (state === s_lsu_release) {
io.lsu_release.valid := true.B
io.lsu_release.bits := probeResponse
when (io.lsu_release.fire) {
state := s_active
}
} .elsewhen (state === s_active) {
io.release.valid := data_req_cnt < refillCycles.U
io.release.bits := Mux(req.voluntary, voluntaryRelease, probeResponse)
when (io.mem_grant) {
acked := true.B
}
when (io.release.fire) {
data_req_cnt := data_req_cnt + 1.U
}
when ((data_req_cnt === (refillCycles-1).U) && io.release.fire) {
state := Mux(req.voluntary, s_grant, s_invalid)
}
} .elsewhen (state === s_grant) {
when (io.mem_grant) {
acked := true.B
}
when (acked) {
state := s_invalid
}
}
}
class BoomProbeUnit(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) {
val io = IO(new Bundle {
val req = Flipped(Decoupled(new TLBundleB(edge.bundle)))
val rep = Decoupled(new TLBundleC(edge.bundle))
val meta_read = Decoupled(new L1MetaReadReq)
val meta_write = Decoupled(new L1MetaWriteReq)
val wb_req = Decoupled(new WritebackReq(edge.bundle))
val way_en = Input(UInt(nWays.W))
val wb_rdy = Input(Bool()) // Is writeback unit currently busy? If so need to retry meta read when its done
val mshr_rdy = Input(Bool()) // Is MSHR ready for this request to proceed?
val mshr_wb_rdy = Output(Bool()) // Should we block MSHR writebacks while we finish our own?
val block_state = Input(new ClientMetadata())
val lsu_release = Decoupled(new TLBundleC(edge.bundle))
val state = Output(Valid(UInt(coreMaxAddrBits.W)))
})
val (s_invalid :: s_meta_read :: s_meta_resp :: s_mshr_req ::
s_mshr_resp :: s_lsu_release :: s_release :: s_writeback_req :: s_writeback_resp ::
s_meta_write :: s_meta_write_resp :: Nil) = Enum(11)
val state = RegInit(s_invalid)
val req = Reg(new TLBundleB(edge.bundle))
val req_idx = req.address(idxMSB, idxLSB)
val req_tag = req.address >> untagBits
val way_en = Reg(UInt())
val tag_matches = way_en.orR
val old_coh = Reg(new ClientMetadata)
val miss_coh = ClientMetadata.onReset
val reply_coh = Mux(tag_matches, old_coh, miss_coh)
val (is_dirty, report_param, new_coh) = reply_coh.onProbe(req.param)
io.state.valid := state =/= s_invalid
io.state.bits := req.address
io.req.ready := state === s_invalid
io.rep.valid := state === s_release
io.rep.bits := edge.ProbeAck(req, report_param)
assert(!io.rep.valid || !edge.hasData(io.rep.bits),
"ProbeUnit should not send ProbeAcks with data, WritebackUnit should handle it")
io.meta_read.valid := state === s_meta_read
io.meta_read.bits.idx := req_idx
io.meta_read.bits.tag := req_tag
io.meta_read.bits.way_en := ~(0.U(nWays.W))
io.meta_write.valid := state === s_meta_write
io.meta_write.bits.way_en := way_en
io.meta_write.bits.idx := req_idx
io.meta_write.bits.tag := req_tag
io.meta_write.bits.data.tag := req_tag
io.meta_write.bits.data.coh := new_coh
io.wb_req.valid := state === s_writeback_req
io.wb_req.bits.source := req.source
io.wb_req.bits.idx := req_idx
io.wb_req.bits.tag := req_tag
io.wb_req.bits.param := report_param
io.wb_req.bits.way_en := way_en
io.wb_req.bits.voluntary := false.B
io.mshr_wb_rdy := !state.isOneOf(s_release, s_writeback_req, s_writeback_resp, s_meta_write, s_meta_write_resp)
io.lsu_release.valid := state === s_lsu_release
io.lsu_release.bits := edge.ProbeAck(req, report_param)
// state === s_invalid
when (state === s_invalid) {
when (io.req.fire) {
state := s_meta_read
req := io.req.bits
}
} .elsewhen (state === s_meta_read) {
when (io.meta_read.fire) {
state := s_meta_resp
}
} .elsewhen (state === s_meta_resp) {
// we need to wait one cycle for the metadata to be read from the array
state := s_mshr_req
} .elsewhen (state === s_mshr_req) {
old_coh := io.block_state
way_en := io.way_en
// if the read didn't go through, we need to retry
state := Mux(io.mshr_rdy && io.wb_rdy, s_mshr_resp, s_meta_read)
} .elsewhen (state === s_mshr_resp) {
state := Mux(tag_matches && is_dirty, s_writeback_req, s_lsu_release)
} .elsewhen (state === s_lsu_release) {
when (io.lsu_release.fire) {
state := s_release
}
} .elsewhen (state === s_release) {
when (io.rep.ready) {
state := Mux(tag_matches, s_meta_write, s_invalid)
}
} .elsewhen (state === s_writeback_req) {
when (io.wb_req.fire) {
state := s_writeback_resp
}
} .elsewhen (state === s_writeback_resp) {
// wait for the writeback request to finish before updating the metadata
when (io.wb_req.ready) {
state := s_meta_write
}
} .elsewhen (state === s_meta_write) {
when (io.meta_write.fire) {
state := s_meta_write_resp
}
} .elsewhen (state === s_meta_write_resp) {
state := s_invalid
}
}
class BoomL1MetaReadReq(implicit p: Parameters) extends BoomBundle()(p) {
val req = Vec(lsuWidth, new L1MetaReadReq)
}
class BoomL1DataReadReq(implicit p: Parameters) extends BoomBundle()(p) {
val req = Vec(lsuWidth, new L1DataReadReq)
val valid = Vec(lsuWidth, Bool())
}
abstract class AbstractBoomDataArray(implicit p: Parameters) extends BoomModule with HasL1HellaCacheParameters {
val io = IO(new BoomBundle {
val read = Input(Vec(lsuWidth, Valid(new L1DataReadReq)))
val write = Input(Valid(new L1DataWriteReq))
val resp = Output(Vec(lsuWidth, Vec(nWays, Bits(encRowBits.W))))
val s1_nacks = Output(Vec(lsuWidth, Bool()))
})
def pipeMap[T <: Data](f: Int => T) = VecInit((0 until lsuWidth).map(f))
}
class BoomDuplicatedDataArray(implicit p: Parameters) extends AbstractBoomDataArray
{
val waddr = io.write.bits.addr >> rowOffBits
for (j <- 0 until lsuWidth) {
val raddr = io.read(j).bits.addr >> rowOffBits
for (w <- 0 until nWays) {
val array = DescribedSRAM(
name = s"array_${w}_${j}",
desc = "Non-blocking DCache Data Array",
size = nSets * refillCycles,
data = Vec(rowWords, Bits(encDataBits.W))
)
when (io.write.bits.way_en(w) && io.write.valid) {
val data = VecInit((0 until rowWords) map (i => io.write.bits.data(encDataBits*(i+1)-1,encDataBits*i)))
array.write(waddr, data, io.write.bits.wmask.asBools)
}
if (dcacheSinglePorted)
io.resp(j)(w) := RegNext(array.read(raddr, io.read(j).bits.way_en(w) && io.read(j).valid).asUInt)
else
io.resp(j)(w) := RegNext(array.read(raddr, io.read(j).valid).asUInt)
}
io.s1_nacks(j) := false.B
}
}
class BoomBankedDataArray(implicit p: Parameters) extends AbstractBoomDataArray {
val nBanks = boomParams.numDCacheBanks
val bankSize = nSets * refillCycles / nBanks
require (nBanks >= lsuWidth)
require (bankSize > 0)
val bankBits = log2Ceil(nBanks)
val bankOffBits = log2Ceil(rowWords) + log2Ceil(wordBytes)
val bidxBits = log2Ceil(bankSize)
val bidxOffBits = bankOffBits + bankBits
//----------------------------------------------------------------------------------------------------
val s0_rbanks = if (nBanks > 1) VecInit(io.read.map(r => (r.bits.addr >> bankOffBits)(bankBits-1,0))) else VecInit(0.U)
val s0_wbank = if (nBanks > 1) (io.write.bits.addr >> bankOffBits)(bankBits-1,0) else 0.U
val s0_ridxs = VecInit(io.read.map(r => (r.bits.addr >> bidxOffBits)(bidxBits-1,0)))
val s0_widx = (io.write.bits.addr >> bidxOffBits)(bidxBits-1,0)
val s0_read_valids = VecInit(io.read.map(_.valid))
val s0_bank_conflicts = pipeMap(w => {
((s0_rbanks(w) === s0_wbank) && io.write.valid && dcacheSinglePorted.B) ||
(0 until w).foldLeft(false.B)((c,i) => c || io.read(i).valid && s0_rbanks(i) === s0_rbanks(w))
})
val s0_do_bank_read = s0_read_valids zip s0_bank_conflicts map {case (v,c) => v && !c}
val s0_bank_read_gnts = Transpose(VecInit(s0_rbanks zip s0_do_bank_read map {case (b,d) => VecInit((UIntToOH(b) & Fill(nBanks,d)).asBools)}))
val s0_bank_write_gnt = (UIntToOH(s0_wbank) & Fill(nBanks, io.write.valid)).asBools
//----------------------------------------------------------------------------------------------------
val s1_rbanks = RegNext(s0_rbanks)
val s1_ridxs = RegNext(s0_ridxs)
val s1_read_valids = RegNext(s0_read_valids)
val s1_pipe_selection = pipeMap(i => VecInit(PriorityEncoderOH(pipeMap(j =>
if (j < i) s1_read_valids(j) && s1_rbanks(j) === s1_rbanks(i)
else if (j == i) true.B else false.B))))
val s1_ridx_match = pipeMap(i => pipeMap(j => if (j < i) s1_ridxs(j) === s1_ridxs(i)
else if (j == i) true.B else false.B))
val s1_nacks = pipeMap(w => s1_read_valids(w)
&& (!RegNext(s0_do_bank_read(w)) || (s1_pipe_selection(w).asUInt & ~s1_ridx_match(w).asUInt).orR)
)
val s1_bank_selection = pipeMap(w => Mux1H(s1_pipe_selection(w), s1_rbanks))
//----------------------------------------------------------------------------------------------------
val s2_bank_selection = RegNext(s1_bank_selection)
io.s1_nacks := s1_nacks
val data_arrays = Seq.tabulate(nBanks) {
b => DescribedSRAM(
name = s"array_${b}",
desc = "Boom DCache data array",
size = bankSize,
data = Vec(nWays * rowWords, Bits(encDataBits.W))
)
}
val s2_bank_reads = Reg(Vec(nBanks, Vec(nWays, Bits(encRowBits.W))))
for (b <- 0 until nBanks) {
val array = data_arrays(b)
val ridx = Mux1H(s0_bank_read_gnts(b), s0_ridxs)
val way_en = Mux1H(s0_bank_read_gnts(b), io.read.map(_.bits.way_en))
val write_en = s0_bank_write_gnt(b)
val write_mask = Cat(Seq.tabulate(nWays) { w =>
Mux(io.write.bits.way_en(w), io.write.bits.wmask, 0.U(rowWords.W))
}.reverse).asBools
val read_en = WireInit(s0_bank_read_gnts(b).reduce(_||_))
s2_bank_reads(b) := (if (dcacheSinglePorted) {
assert(!(read_en && write_en))
array.read(ridx, !write_en && read_en)
} else {
array.read(ridx, read_en)
}).asTypeOf(Vec(nWays, Bits(encRowBits.W)))
when (write_en) {
val data = Wire(Vec(nWays * rowWords, Bits(encDataBits.W)))
for (w <- 0 until nWays) {
for (i <- 0 until rowWords) {
data(w*rowWords+i) := io.write.bits.data(encDataBits*(i+1)-1,encDataBits*i)
}
}
array.write(s0_widx, data, write_mask)
}
}
for (w <- 0 until nWays) {
for (i <- 0 until lsuWidth) {
io.resp(i)(w) := s2_bank_reads(s2_bank_selection(i))(w)
}
}
}
/**
* Top level class wrapping a non-blocking dcache.
*
* @param hartid hardware thread for the cache
*/
class BoomNonBlockingDCache(staticIdForMetadataUseOnly: Int)(implicit p: Parameters) extends LazyModule
{
private val tileParams = p(TileKey)
protected val cfg = tileParams.dcache.get
protected def cacheClientParameters = cfg.scratch.map(x => Seq()).getOrElse(Seq(TLMasterParameters.v1(
name = s"Core ${staticIdForMetadataUseOnly} DCache",
sourceId = IdRange(0, 1 max (cfg.nMSHRs + 1)),
supportsProbe = TransferSizes(cfg.blockBytes, cfg.blockBytes))))
protected def mmioClientParameters = Seq(TLMasterParameters.v1(
name = s"Core ${staticIdForMetadataUseOnly} DCache MMIO",
sourceId = IdRange(cfg.nMSHRs + 1, cfg.nMSHRs + 1 + cfg.nMMIOs),
requestFifo = true))
val node = TLClientNode(Seq(TLMasterPortParameters.v1(
cacheClientParameters ++ mmioClientParameters,
minLatency = 1)))
lazy val module = new BoomNonBlockingDCacheModule(this)
def flushOnFenceI = cfg.scratch.isEmpty && !node.edges.out(0).manager.managers.forall(m => !m.supportsAcquireT || !m.executable || m.regionType >= RegionType.TRACKED || m.regionType <= RegionType.IDEMPOTENT)
require(!tileParams.core.haveCFlush || cfg.scratch.isEmpty, "CFLUSH_D_L1 instruction requires a D$")
}
class BoomDCacheBundle(implicit p: Parameters, edge: TLEdgeOut) extends BoomBundle()(p) {
val errors = new DCacheErrors
val lsu = Flipped(new LSUDMemIO)
}
class BoomNonBlockingDCacheModule(outer: BoomNonBlockingDCache) extends LazyModuleImp(outer)
with HasL1HellaCacheParameters
with HasBoomCoreParameters
{
implicit val edge = outer.node.edges.out(0)
val (tl_out, _) = outer.node.out(0)
val io = IO(new BoomDCacheBundle)
io.errors := DontCare
private val fifoManagers = edge.manager.managers.filter(TLFIFOFixer.allVolatile)
fifoManagers.foreach { m =>
require (m.fifoId == fifoManagers.head.fifoId,
s"IOMSHRs must be FIFO for all regions with effects, but HellaCache sees ${m.nodePath.map(_.name)}")
}
def widthMap[T <: Data](f: Int => T) = VecInit((0 until lsuWidth).map(f))
val t_replay :: t_probe :: t_wb :: t_mshr_meta_read :: t_lsu :: t_prefetch :: Nil = Enum(6)
val wb = Module(new BoomWritebackUnit)
val prober = Module(new BoomProbeUnit)
val mshrs = Module(new BoomMSHRFile)
mshrs.io.clear_all := io.lsu.force_order
mshrs.io.brupdate := io.lsu.brupdate
mshrs.io.exception := io.lsu.exception
mshrs.io.rob_pnr_idx := io.lsu.rob_pnr_idx
mshrs.io.rob_head_idx := io.lsu.rob_head_idx
// tags
def onReset = L1Metadata(0.U, ClientMetadata.onReset)
val meta = Seq.fill(lsuWidth) { Module(new L1MetadataArray(onReset _)) }
val metaWriteArb = Module(new Arbiter(new L1MetaWriteReq, 2))
// 0 goes to MSHR refills, 1 goes to prober
val metaReadArb = Module(new Arbiter(new BoomL1MetaReadReq, 6))
// 0 goes to MSHR replays, 1 goes to prober, 2 goes to wb, 3 goes to MSHR meta read,
// 4 goes to pipeline, 5 goes to prefetcher
metaReadArb.io.in := DontCare
for (w <- 0 until lsuWidth) {
meta(w).io.write.valid := metaWriteArb.io.out.fire
meta(w).io.write.bits := metaWriteArb.io.out.bits
meta(w).io.read.valid := metaReadArb.io.out.valid
meta(w).io.read.bits := metaReadArb.io.out.bits.req(w)
}
metaReadArb.io.out.ready := meta.map(_.io.read.ready).reduce(_||_)
metaWriteArb.io.out.ready := meta.map(_.io.write.ready).reduce(_||_)
// data
val data = Module(if (boomParams.numDCacheBanks == 1) new BoomDuplicatedDataArray else new BoomBankedDataArray)
val dataWriteArb = Module(new Arbiter(new L1DataWriteReq, 2))
// 0 goes to pipeline, 1 goes to MSHR refills
val dataReadArb = Module(new Arbiter(new BoomL1DataReadReq, 3))
// 0 goes to MSHR replays, 1 goes to wb, 2 goes to pipeline
dataReadArb.io.in := DontCare
for (w <- 0 until lsuWidth) {
data.io.read(w).valid := dataReadArb.io.out.bits.valid(w) && dataReadArb.io.out.valid
data.io.read(w).bits := dataReadArb.io.out.bits.req(w)
}
dataReadArb.io.out.ready := true.B
data.io.write.valid := dataWriteArb.io.out.fire
data.io.write.bits := dataWriteArb.io.out.bits
dataWriteArb.io.out.ready := true.B
val singlePortedDCacheWrite = data.io.write.valid && dcacheSinglePorted.B
// ------------
// New requests
// In a 1-wide LSU, load/store wakeups and MSHR resps contend for same port, so
// we should block incoming requests when the MSHR trying to respond
val block_incoming_reqs = (lsuWidth == 1).B && mshrs.io.resp.valid
io.lsu.req.ready := metaReadArb.io.in(4).ready && dataReadArb.io.in(2).ready && !block_incoming_reqs
metaReadArb.io.in(4).valid := io.lsu.req.valid && !block_incoming_reqs
dataReadArb.io.in(2).valid := io.lsu.req.valid && !block_incoming_reqs
for (w <- 0 until lsuWidth) {
// Tag read for new requests
metaReadArb.io.in(4).bits.req(w).idx := io.lsu.req.bits(w).bits.addr >> blockOffBits
metaReadArb.io.in(4).bits.req(w).way_en := DontCare
metaReadArb.io.in(4).bits.req(w).tag := DontCare
// Data read for new requests
dataReadArb.io.in(2).bits.valid(w) := io.lsu.req.bits(w).valid
dataReadArb.io.in(2).bits.req(w).addr := io.lsu.req.bits(w).bits.addr
dataReadArb.io.in(2).bits.req(w).way_en := ~0.U(nWays.W)
}
// ------------
// MSHR Replays
val replay_req = Wire(Vec(lsuWidth, new BoomDCacheReq))
replay_req := DontCare
replay_req(0).uop := mshrs.io.replay.bits.uop
replay_req(0).addr := mshrs.io.replay.bits.addr
replay_req(0).data := mshrs.io.replay.bits.data
replay_req(0).is_hella := mshrs.io.replay.bits.is_hella
// Don't let replays get nacked due to conflict with dcache write
mshrs.io.replay.ready := metaReadArb.io.in(0).ready && dataReadArb.io.in(0).ready && !singlePortedDCacheWrite
// Tag read for MSHR replays
// We don't actually need to read the metadata, for replays we already know our way
metaReadArb.io.in(0).valid := mshrs.io.replay.valid && !singlePortedDCacheWrite
metaReadArb.io.in(0).bits.req(0).idx := mshrs.io.replay.bits.addr >> blockOffBits
metaReadArb.io.in(0).bits.req(0).way_en := DontCare
metaReadArb.io.in(0).bits.req(0).tag := DontCare
// Data read for MSHR replays
dataReadArb.io.in(0).valid := mshrs.io.replay.valid && !singlePortedDCacheWrite
dataReadArb.io.in(0).bits.req(0).addr := mshrs.io.replay.bits.addr
dataReadArb.io.in(0).bits.req(0).way_en := mshrs.io.replay.bits.way_en
dataReadArb.io.in(0).bits.valid := widthMap(w => (w == 0).B)
// -----------
// MSHR Meta read
val mshr_read_req = Wire(Vec(lsuWidth, new BoomDCacheReq))
mshr_read_req := DontCare
mshr_read_req(0).uop := NullMicroOp
mshr_read_req(0).addr := Cat(mshrs.io.meta_read.bits.tag, mshrs.io.meta_read.bits.idx) << blockOffBits
mshr_read_req(0).data := DontCare
mshr_read_req(0).is_hella := false.B
metaReadArb.io.in(3).valid := mshrs.io.meta_read.valid
metaReadArb.io.in(3).bits.req(0) := mshrs.io.meta_read.bits
mshrs.io.meta_read.ready := metaReadArb.io.in(3).ready
// -----------
// Write-backs
val wb_fire = wb.io.meta_read.fire && wb.io.data_req.fire
val wb_req = Wire(Vec(lsuWidth, new BoomDCacheReq))
wb_req := DontCare
wb_req(0).uop := NullMicroOp
wb_req(0).addr := Cat(wb.io.meta_read.bits.tag, wb.io.data_req.bits.addr)
wb_req(0).data := DontCare
wb_req(0).is_hella := false.B
// Couple the two decoupled interfaces of the WBUnit's meta_read and data_read
// Can't launch data read if possibility of conflict w. write
// Tag read for write-back
metaReadArb.io.in(2).valid := wb.io.meta_read.valid && !singlePortedDCacheWrite
metaReadArb.io.in(2).bits.req(0) := wb.io.meta_read.bits
wb.io.meta_read.ready := metaReadArb.io.in(2).ready && dataReadArb.io.in(1).ready && !singlePortedDCacheWrite
// Data read for write-back
dataReadArb.io.in(1).valid := wb.io.data_req.valid && !singlePortedDCacheWrite
dataReadArb.io.in(1).bits.req(0) := wb.io.data_req.bits
dataReadArb.io.in(1).bits.valid := widthMap(w => (w == 0).B)
wb.io.data_req.ready := metaReadArb.io.in(2).ready && dataReadArb.io.in(1).ready && !singlePortedDCacheWrite
assert(!(wb.io.meta_read.fire ^ wb.io.data_req.fire))
// -------
// Prober
val prober_fire = prober.io.meta_read.fire
val prober_req = Wire(Vec(lsuWidth, new BoomDCacheReq))
prober_req := DontCare
prober_req(0).uop := NullMicroOp
prober_req(0).addr := Cat(prober.io.meta_read.bits.tag, prober.io.meta_read.bits.idx) << blockOffBits
prober_req(0).data := DontCare
prober_req(0).is_hella := false.B
// Tag read for prober
metaReadArb.io.in(1).valid := prober.io.meta_read.valid
metaReadArb.io.in(1).bits.req(0) := prober.io.meta_read.bits
prober.io.meta_read.ready := metaReadArb.io.in(1).ready
// Prober does not need to read data array
// -------
// Prefetcher
val prefetch_fire = mshrs.io.prefetch.fire
val prefetch_req = Wire(Vec(lsuWidth, new BoomDCacheReq))
prefetch_req := DontCare
prefetch_req(0) := mshrs.io.prefetch.bits
// Tag read for prefetch
metaReadArb.io.in(5).valid := mshrs.io.prefetch.valid
metaReadArb.io.in(5).bits.req(0).idx := mshrs.io.prefetch.bits.addr >> blockOffBits
metaReadArb.io.in(5).bits.req(0).way_en := DontCare
metaReadArb.io.in(5).bits.req(0).tag := DontCare
mshrs.io.prefetch.ready := metaReadArb.io.in(5).ready
// Prefetch does not need to read data array
val s0_valid = Mux(io.lsu.req.fire, VecInit(io.lsu.req.bits.map(_.valid)),
Mux(mshrs.io.replay.fire || wb_fire || prober_fire || prefetch_fire || mshrs.io.meta_read.fire,
VecInit(1.U(lsuWidth.W).asBools), VecInit(0.U(lsuWidth.W).asBools)))
val s0_req = Mux(io.lsu.req.fire , VecInit(io.lsu.req.bits.map(_.bits)),
Mux(wb_fire , wb_req,
Mux(prober_fire , prober_req,
Mux(prefetch_fire , prefetch_req,
Mux(mshrs.io.meta_read.fire, mshr_read_req
, replay_req)))))
val s0_type = Mux(io.lsu.req.fire , t_lsu,
Mux(wb_fire , t_wb,
Mux(prober_fire , t_probe,
Mux(prefetch_fire , t_prefetch,
Mux(mshrs.io.meta_read.fire, t_mshr_meta_read
, t_replay)))))
// Does this request need to send a response or nack
val s0_send_resp_or_nack = Mux(io.lsu.req.fire, s0_valid,
VecInit(Mux(mshrs.io.replay.fire && isRead(mshrs.io.replay.bits.uop.mem_cmd), 1.U(lsuWidth.W), 0.U(lsuWidth.W)).asBools))
val s1_req = RegNext(s0_req)
for (w <- 0 until lsuWidth)
s1_req(w).uop.br_mask := GetNewBrMask(io.lsu.brupdate, s0_req(w).uop)
val s2_store_failed = Wire(Bool())
val s1_valid = widthMap(w =>
RegNext(s0_valid(w) &&
!IsKilledByBranch(io.lsu.brupdate, false.B, s0_req(w).uop) &&
!(io.lsu.exception && s0_req(w).uop.uses_ldq) &&
!(s2_store_failed && io.lsu.req.fire && s0_req(w).uop.uses_stq),
init=false.B))
for (w <- 0 until lsuWidth)
assert(!(io.lsu.s1_kill(w) && !RegNext(io.lsu.req.fire) && !RegNext(io.lsu.req.bits(w).valid)))
val s1_addr = s1_req.map(_.addr)
val s1_nack = s1_addr.map(a => a(idxMSB,idxLSB) === prober.io.meta_write.bits.idx && !prober.io.req.ready)
val s1_send_resp_or_nack = RegNext(s0_send_resp_or_nack)
val s1_type = RegNext(s0_type)
val s1_mshr_meta_read_way_en = RegNext(mshrs.io.meta_read.bits.way_en)
val s1_replay_way_en = RegNext(mshrs.io.replay.bits.way_en) // For replays, the metadata isn't written yet
val s1_wb_way_en = RegNext(wb.io.data_req.bits.way_en)
// tag check
def wayMap[T <: Data](f: Int => T) = VecInit((0 until nWays).map(f))
val s1_tag_eq_way = widthMap(i => wayMap((w: Int) => meta(i).io.resp(w).tag === (s1_addr(i) >> untagBits)).asUInt)
val s1_tag_match_way = widthMap(i =>
Mux(s1_type === t_replay, s1_replay_way_en,
Mux(s1_type === t_wb, s1_wb_way_en,
Mux(s1_type === t_mshr_meta_read, s1_mshr_meta_read_way_en,
wayMap((w: Int) => s1_tag_eq_way(i)(w) && meta(i).io.resp(w).coh.isValid()).asUInt))))
val s1_wb_idx_matches = widthMap(i => (s1_addr(i)(untagBits-1,blockOffBits) === wb.io.idx.bits) && wb.io.idx.valid)
for (w <- 0 until lsuWidth) {
io.lsu.s1_nack_advisory(w) := data.io.s1_nacks(w)
}
val s2_req = RegNext(s1_req)
val s2_type = RegNext(s1_type)
val s2_valid = widthMap(w =>
RegNext(s1_valid(w) &&
!io.lsu.s1_kill(w) &&
!IsKilledByBranch(io.lsu.brupdate, false.B, s1_req(w).uop) &&
!(io.lsu.exception && s1_req(w).uop.uses_ldq) &&
!(s2_store_failed && (s1_type === t_lsu) && s1_req(w).uop.uses_stq)))
for (w <- 0 until lsuWidth)
s2_req(w).uop.br_mask := GetNewBrMask(io.lsu.brupdate, s1_req(w).uop)
val s2_tag_match_way = RegNext(s1_tag_match_way)
val s2_tag_match = s2_tag_match_way.map(_.orR)
val s2_hit_state = widthMap(i => Mux1H(s2_tag_match_way(i), wayMap((w: Int) => RegNext(meta(i).io.resp(w).coh))))
val s2_has_permission = widthMap(w => s2_hit_state(w).onAccess(s2_req(w).uop.mem_cmd)._1)
val s2_new_hit_state = widthMap(w => s2_hit_state(w).onAccess(s2_req(w).uop.mem_cmd)._3)
val s2_hit = widthMap(w => (s2_tag_match(w) && s2_has_permission(w) && s2_hit_state(w) === s2_new_hit_state(w) && !mshrs.io.block_hit(w)) || s2_type.isOneOf(t_replay, t_wb))
val s2_nack = Wire(Vec(lsuWidth, Bool()))
assert(!(s2_type === t_replay && !s2_hit(0)), "Replays should always hit")
assert(!(s2_type === t_wb && !s2_hit(0)), "Writeback should always see data hit")
val s2_wb_idx_matches = RegNext(s1_wb_idx_matches)
// lr/sc
val debug_sc_fail_addr = RegInit(0.U)
val debug_sc_fail_cnt = RegInit(0.U(8.W))
val lrsc_count = RegInit(0.U(log2Ceil(lrscCycles).W))
val lrsc_valid = lrsc_count > lrscBackoff.U
val lrsc_addr = Reg(UInt())
val s2_lr = s2_req(0).uop.mem_cmd === M_XLR && (!RegNext(s1_nack(0)) || s2_type === t_replay)
val s2_sc = s2_req(0).uop.mem_cmd === M_XSC && (!RegNext(s1_nack(0)) || s2_type === t_replay)
val s2_lrsc_addr_match = widthMap(w => lrsc_valid && lrsc_addr === (s2_req(w).addr >> blockOffBits))
val s2_sc_fail = s2_sc && !s2_lrsc_addr_match(0)
when (lrsc_count > 0.U) { lrsc_count := lrsc_count - 1.U }
when (s2_valid(0) && ((s2_type === t_lsu && s2_hit(0) && !s2_nack(0)) ||
(s2_type === t_replay && s2_req(0).uop.mem_cmd =/= M_FLUSH_ALL))) {
when (s2_lr) {
lrsc_count := (lrscCycles - 1).U
lrsc_addr := s2_req(0).addr >> blockOffBits
}
when (lrsc_count > 0.U) {
lrsc_count := 0.U
}
}
for (w <- 0 until lsuWidth) {
when (s2_valid(w) &&
s2_type === t_lsu &&
!s2_hit(w) &&
!(s2_has_permission(w) && s2_tag_match(w)) &&
s2_lrsc_addr_match(w) &&
!s2_nack(w)) {
lrsc_count := 0.U
}
}
when (s2_valid(0)) {
when (s2_req(0).addr === debug_sc_fail_addr) {
when (s2_sc_fail) {
debug_sc_fail_cnt := debug_sc_fail_cnt + 1.U
} .elsewhen (s2_sc) {
debug_sc_fail_cnt := 0.U
}
} .otherwise {
when (s2_sc_fail) {
debug_sc_fail_addr := s2_req(0).addr
debug_sc_fail_cnt := 1.U
}
}
}
assert(debug_sc_fail_cnt < 100.U, "L1DCache failed too many SCs in a row")
val s2_data = Wire(Vec(lsuWidth, Vec(nWays, UInt(encRowBits.W))))
for (i <- 0 until lsuWidth) {
for (w <- 0 until nWays) {
s2_data(i)(w) := data.io.resp(i)(w)
}
}
val s2_data_muxed = widthMap(w => Mux1H(s2_tag_match_way(w), s2_data(w)))
val s2_word_idx = widthMap(w => if (rowWords == 1) 0.U else s2_req(w).addr(log2Up(rowWords*wordBytes)-1, log2Up(wordBytes)))
// replacement policy
val replacer = cacheParams.replacement
val s1_replaced_way_en = UIntToOH(replacer.way)
val s2_replaced_way_en = UIntToOH(RegNext(replacer.way))
val s2_repl_meta = widthMap(i => Mux1H(s2_replaced_way_en, wayMap((w: Int) => RegNext(meta(i).io.resp(w))).toSeq))
// nack because of incoming probe
val s2_nack_hit = RegNext(VecInit(s1_nack))
// Nack when we hit something currently being evicted
val s2_nack_victim = widthMap(w => s2_valid(w) && s2_hit(w) && mshrs.io.secondary_miss(w))
// MSHRs not ready for request
val s2_nack_miss = widthMap(w => s2_valid(w) && !s2_hit(w) && !mshrs.io.req(w).ready)
// Bank conflict on data arrays
val s2_nack_data = widthMap(w => s2_valid(w) && RegNext(data.io.s1_nacks(w)))
// Can't allocate MSHR for same set currently being written back
val s2_nack_wb = widthMap(w => s2_valid(w) && !s2_hit(w) && s2_wb_idx_matches(w))
s2_nack := widthMap(w => (s2_nack_miss(w) || s2_nack_hit(w) || s2_nack_victim(w) || s2_nack_data(w) || s2_nack_wb(w)) && s2_type =/= t_replay)
assert(!(s2_nack_data.reduce(_||_) && s2_type.isOneOf(t_replay, t_wb)))
val s2_send_resp = widthMap(w => (
RegNext(s1_send_resp_or_nack(w)) &&
(!(s2_nack_hit(w) || s2_nack_victim(w) || s2_nack_data(w)) || s2_type === t_replay) &&
s2_hit(w) && isRead(s2_req(w).uop.mem_cmd)
))
val s2_send_store_ack = widthMap(w => (
RegNext(s1_send_resp_or_nack(w)) && !s2_nack(w) && isWrite(s2_req(w).uop.mem_cmd) &&
(s2_hit(w) || mshrs.io.req(w).fire)))
val s2_send_nack = widthMap(w => (RegNext(s1_send_resp_or_nack(w)) && s2_nack(w)))
for (w <- 0 until lsuWidth)
assert(!(s2_send_resp(w) && s2_send_nack(w)))
// hits always send a response
// If MSHR is not available, LSU has to replay this request later
// If MSHR is available and this is only a store(not a amo), we don't need to wait for resp later
s2_store_failed := s2_valid(0) && s2_nack(0) && s2_send_nack(0) && s2_req(0).uop.uses_stq
// Miss handling
for (w <- 0 until lsuWidth) {
mshrs.io.req(w).valid := s2_valid(w) &&
!s2_hit(w) &&
!s2_nack_hit(w) &&
!s2_nack_victim(w) &&
!s2_nack_data(w) &&
!s2_nack_wb(w) &&
s2_type.isOneOf(t_lsu, t_prefetch) &&
!(io.lsu.exception && s2_req(w).uop.uses_ldq) &&
(isPrefetch(s2_req(w).uop.mem_cmd) ||
isRead(s2_req(w).uop.mem_cmd) ||
isWrite(s2_req(w).uop.mem_cmd))
assert(!(mshrs.io.req(w).valid && s2_type === t_replay), "Replays should not need to go back into MSHRs")
mshrs.io.req(w).bits := DontCare
mshrs.io.req(w).bits.uop := s2_req(w).uop
mshrs.io.req(w).bits.addr := s2_req(w).addr
mshrs.io.req(w).bits.tag_match := s2_tag_match(w)
mshrs.io.req(w).bits.old_meta := Mux(s2_tag_match(w), L1Metadata(s2_repl_meta(w).tag, s2_hit_state(w)), s2_repl_meta(w))
mshrs.io.req(w).bits.way_en := Mux(s2_tag_match(w), s2_tag_match_way(w), s2_replaced_way_en)
mshrs.io.req(w).bits.data := s2_req(w).data
mshrs.io.req(w).bits.is_hella := s2_req(w).is_hella
mshrs.io.req_is_probe(w) := s2_type === t_probe && s2_valid(w)
}
mshrs.io.meta_resp.valid := !s2_nack_hit(0) || prober.io.mshr_wb_rdy
mshrs.io.meta_resp.bits := Mux1H(s2_tag_match_way(0), RegNext(meta(0).io.resp))
when (mshrs.io.req.map(_.fire).reduce(_||_)) { replacer.miss }
tl_out.a <> mshrs.io.mem_acquire
// probes and releases
prober.io.req.valid := tl_out.b.valid && !lrsc_valid
tl_out.b.ready := prober.io.req.ready && !lrsc_valid
prober.io.req.bits := tl_out.b.bits
prober.io.way_en := s2_tag_match_way(0)
prober.io.block_state := s2_hit_state(0)
metaWriteArb.io.in(1) <> prober.io.meta_write
prober.io.mshr_rdy := mshrs.io.probe_rdy
prober.io.wb_rdy := (prober.io.meta_write.bits.idx =/= wb.io.idx.bits) || !wb.io.idx.valid
mshrs.io.prober_state := prober.io.state
// refills
when (tl_out.d.bits.source === cfg.nMSHRs.U) {
// This should be ReleaseAck
tl_out.d.ready := true.B
mshrs.io.mem_grant.valid := false.B
mshrs.io.mem_grant.bits := DontCare
} .otherwise {
// This should be GrantData
mshrs.io.mem_grant <> tl_out.d
}
dataWriteArb.io.in(1) <> mshrs.io.refill
metaWriteArb.io.in(0) <> mshrs.io.meta_write
tl_out.e <> mshrs.io.mem_finish
// writebacks
val wbArb = Module(new Arbiter(new WritebackReq(edge.bundle), 2))
// 0 goes to prober, 1 goes to MSHR evictions
wbArb.io.in(0) <> prober.io.wb_req
wbArb.io.in(1) <> mshrs.io.wb_req
wb.io.req <> wbArb.io.out
wb.io.data_resp := s2_data_muxed(0)
mshrs.io.wb_resp := wb.io.resp
wb.io.mem_grant := tl_out.d.fire && tl_out.d.bits.source === cfg.nMSHRs.U
val lsu_release_arb = Module(new Arbiter(new TLBundleC(edge.bundle), 2))
io.lsu.release <> lsu_release_arb.io.out
lsu_release_arb.io.in(0) <> wb.io.lsu_release
lsu_release_arb.io.in(1) <> prober.io.lsu_release
TLArbiter.lowest(edge, tl_out.c, wb.io.release, prober.io.rep)
io.lsu.perf.release := edge.done(tl_out.c)
io.lsu.perf.acquire := edge.done(tl_out.a)
// load data gen
val s2_data_word_prebypass = widthMap(w => s2_data_muxed(w) >> Cat(s2_word_idx(w), 0.U(log2Ceil(coreDataBits).W)))
val s2_data_word = Wire(Vec(lsuWidth, UInt()))
val loadgen = (0 until lsuWidth).map { w =>
new LoadGen(s2_req(w).uop.mem_size, s2_req(w).uop.mem_signed, s2_req(w).addr,
s2_data_word(w), s2_sc && (w == 0).B, wordBytes)
}
// Mux between cache responses and uncache responses
for (w <- 0 until lsuWidth) {
io.lsu.resp(w).valid := s2_valid(w) && s2_send_resp(w)
io.lsu.resp(w).bits.uop := s2_req(w).uop
io.lsu.resp(w).bits.data := loadgen(w).data | s2_sc_fail
io.lsu.resp(w).bits.is_hella := s2_req(w).is_hella
io.lsu.nack(w).valid := s2_valid(w) && s2_send_nack(w)
io.lsu.nack(w).bits := s2_req(w)
assert(!(io.lsu.nack(w).valid && s2_type =/= t_lsu))
io.lsu.store_ack(w).valid := s2_valid(w) && s2_send_store_ack(w) && (w == 0).B
io.lsu.store_ack(w).bits := s2_req(w)
}
io.lsu.ll_resp <> mshrs.io.resp
// Store/amo hits
val s3_req = Wire(new BoomDCacheReq)
s3_req := RegNext(s2_req(0))
val s3_valid = RegNext(s2_valid(0) && s2_hit(0) && isWrite(s2_req(0).uop.mem_cmd) &&
!s2_sc_fail && !(s2_send_nack(0) && s2_nack(0)))
val s3_data_word = RegNext(s2_data_word(0))
for (w <- 1 until lsuWidth) {
assert(!(s2_valid(w) && s2_hit(w) && isWrite(s2_req(w).uop.mem_cmd) &&
!s2_sc_fail && !(s2_send_nack(w) && s2_nack(w))),
"Store must go through 0th pipe in L1D")
}
// For bypassing
val s4_req = RegNext(s3_req)
val s4_valid = RegNext(s3_valid)
val s5_req = RegNext(s4_req)
val s5_valid = RegNext(s4_valid)
val s3_bypass = widthMap(w => s3_valid && ((s2_req(w).addr >> wordOffBits) === (s3_req.addr >> wordOffBits)))
val s4_bypass = widthMap(w => s4_valid && ((s2_req(w).addr >> wordOffBits) === (s4_req.addr >> wordOffBits)))
val s5_bypass = widthMap(w => s5_valid && ((s2_req(w).addr >> wordOffBits) === (s5_req.addr >> wordOffBits)))
// Store -> Load bypassing
for (w <- 0 until lsuWidth) {
s2_data_word(w) := Mux(s3_bypass(w), s3_req.data,
Mux(s4_bypass(w), s4_req.data,
Mux(s5_bypass(w), s5_req.data,
s2_data_word_prebypass(w))))
}
val amoalu = Module(new AMOALU(xLen))
amoalu.io.mask := new StoreGen(s3_req.uop.mem_size, s3_req.addr, 0.U, xLen/8).mask
amoalu.io.cmd := s3_req.uop.mem_cmd
amoalu.io.lhs := s3_data_word
amoalu.io.rhs := RegNext(s2_req(0).data)
s3_req.data := amoalu.io.out
val s3_way = RegNext(s2_tag_match_way(0))
dataWriteArb.io.in(0).valid := s3_valid
dataWriteArb.io.in(0).bits.addr := s3_req.addr
dataWriteArb.io.in(0).bits.wmask := UIntToOH(s3_req.addr.extract(rowOffBits-1,offsetlsb))
dataWriteArb.io.in(0).bits.data := Fill(rowWords, s3_req.data)
dataWriteArb.io.in(0).bits.way_en := s3_way
io.lsu.ordered := mshrs.io.fence_rdy && !s1_valid.reduce(_||_) && !s2_valid.reduce(_||_)
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module BoomProbeUnit_1( // @[dcache.scala:145:7]
input clock, // @[dcache.scala:145:7]
input reset, // @[dcache.scala:145:7]
output io_req_ready, // @[dcache.scala:146:14]
input io_req_valid, // @[dcache.scala:146:14]
input [2:0] io_req_bits_opcode, // @[dcache.scala:146:14]
input [1:0] io_req_bits_param, // @[dcache.scala:146:14]
input [3:0] io_req_bits_size, // @[dcache.scala:146:14]
input [3:0] io_req_bits_source, // @[dcache.scala:146:14]
input [31:0] io_req_bits_address, // @[dcache.scala:146:14]
input [7:0] io_req_bits_mask, // @[dcache.scala:146:14]
input [63:0] io_req_bits_data, // @[dcache.scala:146:14]
input io_req_bits_corrupt, // @[dcache.scala:146:14]
input io_rep_ready, // @[dcache.scala:146:14]
output io_rep_valid, // @[dcache.scala:146:14]
output [2:0] io_rep_bits_param, // @[dcache.scala:146:14]
output [3:0] io_rep_bits_size, // @[dcache.scala:146:14]
output [3:0] io_rep_bits_source, // @[dcache.scala:146:14]
output [31:0] io_rep_bits_address, // @[dcache.scala:146:14]
input io_meta_read_ready, // @[dcache.scala:146:14]
output io_meta_read_valid, // @[dcache.scala:146:14]
output [3:0] io_meta_read_bits_idx, // @[dcache.scala:146:14]
output [21:0] io_meta_read_bits_tag, // @[dcache.scala:146:14]
input io_meta_write_ready, // @[dcache.scala:146:14]
output io_meta_write_valid, // @[dcache.scala:146:14]
output [3:0] io_meta_write_bits_idx, // @[dcache.scala:146:14]
output [1:0] io_meta_write_bits_way_en, // @[dcache.scala:146:14]
output [21:0] io_meta_write_bits_tag, // @[dcache.scala:146:14]
output [1:0] io_meta_write_bits_data_coh_state, // @[dcache.scala:146:14]
output [21:0] io_meta_write_bits_data_tag, // @[dcache.scala:146:14]
input io_wb_req_ready, // @[dcache.scala:146:14]
output io_wb_req_valid, // @[dcache.scala:146:14]
output [21:0] io_wb_req_bits_tag, // @[dcache.scala:146:14]
output [3:0] io_wb_req_bits_idx, // @[dcache.scala:146:14]
output [3:0] io_wb_req_bits_source, // @[dcache.scala:146:14]
output [2:0] io_wb_req_bits_param, // @[dcache.scala:146:14]
output [1:0] io_wb_req_bits_way_en, // @[dcache.scala:146:14]
input [1:0] io_way_en, // @[dcache.scala:146:14]
input io_wb_rdy, // @[dcache.scala:146:14]
input io_mshr_rdy, // @[dcache.scala:146:14]
output io_mshr_wb_rdy, // @[dcache.scala:146:14]
input [1:0] io_block_state_state, // @[dcache.scala:146:14]
input io_lsu_release_ready, // @[dcache.scala:146:14]
output io_lsu_release_valid, // @[dcache.scala:146:14]
output [2:0] io_lsu_release_bits_param, // @[dcache.scala:146:14]
output [3:0] io_lsu_release_bits_size, // @[dcache.scala:146:14]
output [3:0] io_lsu_release_bits_source, // @[dcache.scala:146:14]
output [31:0] io_lsu_release_bits_address, // @[dcache.scala:146:14]
output io_state_valid, // @[dcache.scala:146:14]
output [33:0] io_state_bits // @[dcache.scala:146:14]
);
wire io_req_valid_0 = io_req_valid; // @[dcache.scala:145:7]
wire [2:0] io_req_bits_opcode_0 = io_req_bits_opcode; // @[dcache.scala:145:7]
wire [1:0] io_req_bits_param_0 = io_req_bits_param; // @[dcache.scala:145:7]
wire [3:0] io_req_bits_size_0 = io_req_bits_size; // @[dcache.scala:145:7]
wire [3:0] io_req_bits_source_0 = io_req_bits_source; // @[dcache.scala:145:7]
wire [31:0] io_req_bits_address_0 = io_req_bits_address; // @[dcache.scala:145:7]
wire [7:0] io_req_bits_mask_0 = io_req_bits_mask; // @[dcache.scala:145:7]
wire [63:0] io_req_bits_data_0 = io_req_bits_data; // @[dcache.scala:145:7]
wire io_req_bits_corrupt_0 = io_req_bits_corrupt; // @[dcache.scala:145:7]
wire io_rep_ready_0 = io_rep_ready; // @[dcache.scala:145:7]
wire io_meta_read_ready_0 = io_meta_read_ready; // @[dcache.scala:145:7]
wire io_meta_write_ready_0 = io_meta_write_ready; // @[dcache.scala:145:7]
wire io_wb_req_ready_0 = io_wb_req_ready; // @[dcache.scala:145:7]
wire [1:0] io_way_en_0 = io_way_en; // @[dcache.scala:145:7]
wire io_wb_rdy_0 = io_wb_rdy; // @[dcache.scala:145:7]
wire io_mshr_rdy_0 = io_mshr_rdy; // @[dcache.scala:145:7]
wire [1:0] io_block_state_state_0 = io_block_state_state; // @[dcache.scala:145:7]
wire io_lsu_release_ready_0 = io_lsu_release_ready; // @[dcache.scala:145:7]
wire [3:0] _r_T_1 = 4'h3; // @[Metadata.scala:122:10]
wire [3:0] _r_T_2 = 4'h2; // @[Metadata.scala:123:10]
wire [3:0] _r_T_3 = 4'h1; // @[Metadata.scala:124:10]
wire [3:0] _r_T_4 = 4'h0; // @[Metadata.scala:125:10]
wire [3:0] _r_T_5 = 4'h7; // @[Metadata.scala:126:10]
wire [3:0] _r_T_6 = 4'h6; // @[Metadata.scala:127:10]
wire [3:0] _r_T_7 = 4'h5; // @[Metadata.scala:128:10]
wire [3:0] _r_T_8 = 4'h4; // @[Metadata.scala:129:10]
wire [3:0] _r_T_9 = 4'hB; // @[Metadata.scala:130:10]
wire [3:0] _r_T_10 = 4'hA; // @[Metadata.scala:131:10]
wire [3:0] _r_T_11 = 4'h9; // @[Metadata.scala:132:10]
wire [3:0] _r_T_12 = 4'h8; // @[Metadata.scala:133:10]
wire [1:0] miss_coh_state = 2'h0; // @[Metadata.scala:160:20]
wire [1:0] _r_T_16 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _r_T_20 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _r_T_24 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _r_T_28 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _r_T_32 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] io_meta_read_bits_way_en = 2'h3; // @[dcache.scala:145:7]
wire [1:0] _io_meta_read_bits_way_en_T = 2'h3; // @[dcache.scala:191:31]
wire io_rep_bits_corrupt = 1'h0; // @[dcache.scala:145:7]
wire io_wb_req_bits_voluntary = 1'h0; // @[dcache.scala:145:7]
wire io_lsu_release_bits_corrupt = 1'h0; // @[dcache.scala:145:7]
wire _r_T_14 = 1'h0; // @[Misc.scala:38:9]
wire _r_T_18 = 1'h0; // @[Misc.scala:38:9]
wire _r_T_22 = 1'h0; // @[Misc.scala:38:9]
wire io_rep_bits_c_corrupt = 1'h0; // @[Edges.scala:416:17]
wire opdata = 1'h0; // @[Edges.scala:102:36]
wire io_lsu_release_bits_c_corrupt = 1'h0; // @[Edges.scala:416:17]
wire [63:0] io_rep_bits_data = 64'h0; // @[dcache.scala:145:7]
wire [63:0] io_lsu_release_bits_data = 64'h0; // @[dcache.scala:145:7]
wire [63:0] io_rep_bits_c_data = 64'h0; // @[Edges.scala:416:17]
wire [63:0] io_lsu_release_bits_c_data = 64'h0; // @[Edges.scala:416:17]
wire [2:0] io_rep_bits_opcode = 3'h4; // @[dcache.scala:145:7]
wire [2:0] io_lsu_release_bits_opcode = 3'h4; // @[dcache.scala:145:7]
wire _io_req_ready_T; // @[dcache.scala:181:25]
wire [2:0] io_rep_bits_c_opcode = 3'h4; // @[Edges.scala:416:17]
wire [2:0] io_lsu_release_bits_c_opcode = 3'h4; // @[Edges.scala:416:17]
wire _io_rep_valid_T; // @[dcache.scala:182:25]
wire [2:0] io_rep_bits_c_param; // @[Edges.scala:416:17]
wire [3:0] io_rep_bits_c_size; // @[Edges.scala:416:17]
wire [3:0] io_rep_bits_c_source; // @[Edges.scala:416:17]
wire [31:0] io_rep_bits_c_address; // @[Edges.scala:416:17]
wire _io_meta_read_valid_T; // @[dcache.scala:188:31]
wire [3:0] req_idx; // @[dcache.scala:168:28]
wire [21:0] req_tag; // @[dcache.scala:169:29]
wire _io_meta_write_valid_T; // @[dcache.scala:193:32]
wire [1:0] new_coh_state; // @[Metadata.scala:160:20]
wire _io_wb_req_valid_T; // @[dcache.scala:200:28]
wire [2:0] report_param; // @[Misc.scala:38:36]
wire _io_mshr_wb_rdy_T_9; // @[dcache.scala:209:21]
wire _io_lsu_release_valid_T; // @[dcache.scala:211:33]
wire [2:0] io_lsu_release_bits_c_param; // @[Edges.scala:416:17]
wire [3:0] io_lsu_release_bits_c_size; // @[Edges.scala:416:17]
wire [3:0] io_lsu_release_bits_c_source; // @[Edges.scala:416:17]
wire [31:0] io_lsu_release_bits_c_address; // @[Edges.scala:416:17]
wire _io_state_valid_T; // @[dcache.scala:178:27]
wire io_req_ready_0; // @[dcache.scala:145:7]
wire [2:0] io_rep_bits_param_0; // @[dcache.scala:145:7]
wire [3:0] io_rep_bits_size_0; // @[dcache.scala:145:7]
wire [3:0] io_rep_bits_source_0; // @[dcache.scala:145:7]
wire [31:0] io_rep_bits_address_0; // @[dcache.scala:145:7]
wire io_rep_valid_0; // @[dcache.scala:145:7]
wire [3:0] io_meta_read_bits_idx_0; // @[dcache.scala:145:7]
wire [21:0] io_meta_read_bits_tag_0; // @[dcache.scala:145:7]
wire io_meta_read_valid_0; // @[dcache.scala:145:7]
wire [1:0] io_meta_write_bits_data_coh_state_0; // @[dcache.scala:145:7]
wire [21:0] io_meta_write_bits_data_tag_0; // @[dcache.scala:145:7]
wire [3:0] io_meta_write_bits_idx_0; // @[dcache.scala:145:7]
wire [1:0] io_meta_write_bits_way_en_0; // @[dcache.scala:145:7]
wire [21:0] io_meta_write_bits_tag_0; // @[dcache.scala:145:7]
wire io_meta_write_valid_0; // @[dcache.scala:145:7]
wire [21:0] io_wb_req_bits_tag_0; // @[dcache.scala:145:7]
wire [3:0] io_wb_req_bits_idx_0; // @[dcache.scala:145:7]
wire [3:0] io_wb_req_bits_source_0; // @[dcache.scala:145:7]
wire [2:0] io_wb_req_bits_param_0; // @[dcache.scala:145:7]
wire [1:0] io_wb_req_bits_way_en_0; // @[dcache.scala:145:7]
wire io_wb_req_valid_0; // @[dcache.scala:145:7]
wire [2:0] io_lsu_release_bits_param_0; // @[dcache.scala:145:7]
wire [3:0] io_lsu_release_bits_size_0; // @[dcache.scala:145:7]
wire [3:0] io_lsu_release_bits_source_0; // @[dcache.scala:145:7]
wire [31:0] io_lsu_release_bits_address_0; // @[dcache.scala:145:7]
wire io_lsu_release_valid_0; // @[dcache.scala:145:7]
wire io_state_valid_0; // @[dcache.scala:145:7]
wire [33:0] io_state_bits_0; // @[dcache.scala:145:7]
wire io_mshr_wb_rdy_0; // @[dcache.scala:145:7]
reg [3:0] state; // @[dcache.scala:165:22]
reg [2:0] req_opcode; // @[dcache.scala:167:16]
reg [1:0] req_param; // @[dcache.scala:167:16]
reg [3:0] req_size; // @[dcache.scala:167:16]
assign io_rep_bits_c_size = req_size; // @[Edges.scala:416:17]
assign io_lsu_release_bits_c_size = req_size; // @[Edges.scala:416:17]
reg [3:0] req_source; // @[dcache.scala:167:16]
assign io_wb_req_bits_source_0 = req_source; // @[dcache.scala:145:7, :167:16]
assign io_rep_bits_c_source = req_source; // @[Edges.scala:416:17]
assign io_lsu_release_bits_c_source = req_source; // @[Edges.scala:416:17]
reg [31:0] req_address; // @[dcache.scala:167:16]
assign io_rep_bits_c_address = req_address; // @[Edges.scala:416:17]
assign io_lsu_release_bits_c_address = req_address; // @[Edges.scala:416:17]
reg [7:0] req_mask; // @[dcache.scala:167:16]
reg [63:0] req_data; // @[dcache.scala:167:16]
reg req_corrupt; // @[dcache.scala:167:16]
assign req_idx = req_address[9:6]; // @[dcache.scala:167:16, :168:28]
assign io_meta_read_bits_idx_0 = req_idx; // @[dcache.scala:145:7, :168:28]
assign io_meta_write_bits_idx_0 = req_idx; // @[dcache.scala:145:7, :168:28]
assign io_wb_req_bits_idx_0 = req_idx; // @[dcache.scala:145:7, :168:28]
assign req_tag = req_address[31:10]; // @[dcache.scala:167:16, :169:29]
assign io_meta_read_bits_tag_0 = req_tag; // @[dcache.scala:145:7, :169:29]
assign io_meta_write_bits_tag_0 = req_tag; // @[dcache.scala:145:7, :169:29]
assign io_meta_write_bits_data_tag_0 = req_tag; // @[dcache.scala:145:7, :169:29]
assign io_wb_req_bits_tag_0 = req_tag; // @[dcache.scala:145:7, :169:29]
reg [1:0] way_en; // @[dcache.scala:171:19]
assign io_meta_write_bits_way_en_0 = way_en; // @[dcache.scala:145:7, :171:19]
assign io_wb_req_bits_way_en_0 = way_en; // @[dcache.scala:145:7, :171:19]
wire tag_matches = |way_en; // @[dcache.scala:171:19, :172:28]
reg [1:0] old_coh_state; // @[dcache.scala:173:20]
wire [1:0] reply_coh_state = tag_matches ? old_coh_state : 2'h0; // @[dcache.scala:172:28, :173:20, :175:22]
wire [3:0] _r_T = {req_param, reply_coh_state}; // @[Metadata.scala:120:19]
wire _r_T_13 = _r_T == 4'h8; // @[Misc.scala:56:20]
wire [2:0] _r_T_15 = _r_T_13 ? 3'h5 : 3'h0; // @[Misc.scala:38:36, :56:20]
wire _r_T_17 = _r_T == 4'h9; // @[Misc.scala:56:20]
wire [2:0] _r_T_19 = _r_T_17 ? 3'h2 : _r_T_15; // @[Misc.scala:38:36, :56:20]
wire _r_T_21 = _r_T == 4'hA; // @[Misc.scala:56:20]
wire [2:0] _r_T_23 = _r_T_21 ? 3'h1 : _r_T_19; // @[Misc.scala:38:36, :56:20]
wire _r_T_25 = _r_T == 4'hB; // @[Misc.scala:56:20]
wire _r_T_26 = _r_T_25; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_27 = _r_T_25 ? 3'h1 : _r_T_23; // @[Misc.scala:38:36, :56:20]
wire _r_T_29 = _r_T == 4'h4; // @[Misc.scala:56:20]
wire _r_T_30 = ~_r_T_29 & _r_T_26; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_31 = _r_T_29 ? 3'h5 : _r_T_27; // @[Misc.scala:38:36, :56:20]
wire _r_T_33 = _r_T == 4'h5; // @[Misc.scala:56:20]
wire _r_T_34 = ~_r_T_33 & _r_T_30; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_35 = _r_T_33 ? 3'h4 : _r_T_31; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_36 = {1'h0, _r_T_33}; // @[Misc.scala:38:63, :56:20]
wire _r_T_37 = _r_T == 4'h6; // @[Misc.scala:56:20]
wire _r_T_38 = ~_r_T_37 & _r_T_34; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_39 = _r_T_37 ? 3'h0 : _r_T_35; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_40 = _r_T_37 ? 2'h1 : _r_T_36; // @[Misc.scala:38:63, :56:20]
wire _r_T_41 = _r_T == 4'h7; // @[Misc.scala:56:20]
wire _r_T_42 = _r_T_41 | _r_T_38; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_43 = _r_T_41 ? 3'h0 : _r_T_39; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_44 = _r_T_41 ? 2'h1 : _r_T_40; // @[Misc.scala:38:63, :56:20]
wire _r_T_45 = _r_T == 4'h0; // @[Misc.scala:56:20]
wire _r_T_46 = ~_r_T_45 & _r_T_42; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_47 = _r_T_45 ? 3'h5 : _r_T_43; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_48 = _r_T_45 ? 2'h0 : _r_T_44; // @[Misc.scala:38:63, :56:20]
wire _r_T_49 = _r_T == 4'h1; // @[Misc.scala:56:20]
wire _r_T_50 = ~_r_T_49 & _r_T_46; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_51 = _r_T_49 ? 3'h4 : _r_T_47; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_52 = _r_T_49 ? 2'h1 : _r_T_48; // @[Misc.scala:38:63, :56:20]
wire _r_T_53 = _r_T == 4'h2; // @[Misc.scala:56:20]
wire _r_T_54 = ~_r_T_53 & _r_T_50; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_55 = _r_T_53 ? 3'h3 : _r_T_51; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_56 = _r_T_53 ? 2'h2 : _r_T_52; // @[Misc.scala:38:63, :56:20]
wire _r_T_57 = _r_T == 4'h3; // @[Misc.scala:56:20]
wire is_dirty = _r_T_57 | _r_T_54; // @[Misc.scala:38:9, :56:20]
assign report_param = _r_T_57 ? 3'h3 : _r_T_55; // @[Misc.scala:38:36, :56:20]
assign io_wb_req_bits_param_0 = report_param; // @[Misc.scala:38:36]
assign io_rep_bits_c_param = report_param; // @[Misc.scala:38:36]
assign io_lsu_release_bits_c_param = report_param; // @[Misc.scala:38:36]
wire [1:0] r_3 = _r_T_57 ? 2'h2 : _r_T_56; // @[Misc.scala:38:63, :56:20]
assign new_coh_state = r_3; // @[Misc.scala:38:63]
assign io_meta_write_bits_data_coh_state_0 = new_coh_state; // @[Metadata.scala:160:20]
assign _io_state_valid_T = |state; // @[dcache.scala:165:22, :178:27]
assign io_state_valid_0 = _io_state_valid_T; // @[dcache.scala:145:7, :178:27]
assign io_state_bits_0 = {2'h0, req_address}; // @[dcache.scala:145:7, :167:16, :179:18]
assign _io_req_ready_T = ~(|state); // @[dcache.scala:165:22, :178:27, :181:25]
assign io_req_ready_0 = _io_req_ready_T; // @[dcache.scala:145:7, :181:25]
wire _T_15 = state == 4'h6; // @[dcache.scala:165:22, :182:25]
assign _io_rep_valid_T = _T_15; // @[dcache.scala:182:25]
wire _io_mshr_wb_rdy_T; // @[package.scala:16:47]
assign _io_mshr_wb_rdy_T = _T_15; // @[package.scala:16:47]
assign io_rep_valid_0 = _io_rep_valid_T; // @[dcache.scala:145:7, :182:25]
assign io_rep_bits_param_0 = io_rep_bits_c_param; // @[Edges.scala:416:17]
assign io_rep_bits_size_0 = io_rep_bits_c_size; // @[Edges.scala:416:17]
assign io_rep_bits_source_0 = io_rep_bits_c_source; // @[Edges.scala:416:17]
assign io_rep_bits_address_0 = io_rep_bits_c_address; // @[Edges.scala:416:17]
assign _io_meta_read_valid_T = state == 4'h1; // @[dcache.scala:165:22, :188:31]
assign io_meta_read_valid_0 = _io_meta_read_valid_T; // @[dcache.scala:145:7, :188:31]
wire _T_19 = state == 4'h9; // @[dcache.scala:165:22, :193:32]
assign _io_meta_write_valid_T = _T_19; // @[dcache.scala:193:32]
wire _io_mshr_wb_rdy_T_3; // @[package.scala:16:47]
assign _io_mshr_wb_rdy_T_3 = _T_19; // @[package.scala:16:47]
assign io_meta_write_valid_0 = _io_meta_write_valid_T; // @[dcache.scala:145:7, :193:32]
wire _T_16 = state == 4'h7; // @[dcache.scala:165:22, :200:28]
assign _io_wb_req_valid_T = _T_16; // @[dcache.scala:200:28]
wire _io_mshr_wb_rdy_T_1; // @[package.scala:16:47]
assign _io_mshr_wb_rdy_T_1 = _T_16; // @[package.scala:16:47]
assign io_wb_req_valid_0 = _io_wb_req_valid_T; // @[dcache.scala:145:7, :200:28]
wire _io_mshr_wb_rdy_T_2 = state == 4'h8; // @[package.scala:16:47]
wire _io_mshr_wb_rdy_T_4 = state == 4'hA; // @[package.scala:16:47]
wire _io_mshr_wb_rdy_T_5 = _io_mshr_wb_rdy_T | _io_mshr_wb_rdy_T_1; // @[package.scala:16:47, :81:59]
wire _io_mshr_wb_rdy_T_6 = _io_mshr_wb_rdy_T_5 | _io_mshr_wb_rdy_T_2; // @[package.scala:16:47, :81:59]
wire _io_mshr_wb_rdy_T_7 = _io_mshr_wb_rdy_T_6 | _io_mshr_wb_rdy_T_3; // @[package.scala:16:47, :81:59]
wire _io_mshr_wb_rdy_T_8 = _io_mshr_wb_rdy_T_7 | _io_mshr_wb_rdy_T_4; // @[package.scala:16:47, :81:59]
assign _io_mshr_wb_rdy_T_9 = ~_io_mshr_wb_rdy_T_8; // @[package.scala:81:59]
assign io_mshr_wb_rdy_0 = _io_mshr_wb_rdy_T_9; // @[dcache.scala:145:7, :209:21]
assign _io_lsu_release_valid_T = state == 4'h5; // @[dcache.scala:165:22, :211:33]
assign io_lsu_release_valid_0 = _io_lsu_release_valid_T; // @[dcache.scala:145:7, :211:33]
assign io_lsu_release_bits_param_0 = io_lsu_release_bits_c_param; // @[Edges.scala:416:17]
assign io_lsu_release_bits_size_0 = io_lsu_release_bits_c_size; // @[Edges.scala:416:17]
assign io_lsu_release_bits_source_0 = io_lsu_release_bits_c_source; // @[Edges.scala:416:17]
assign io_lsu_release_bits_address_0 = io_lsu_release_bits_c_address; // @[Edges.scala:416:17]
wire _state_T = io_mshr_rdy_0 & io_wb_rdy_0; // @[dcache.scala:145:7, :231:30]
wire [3:0] _state_T_1 = _state_T ? 4'h4 : 4'h1; // @[dcache.scala:231:{17,30}]
wire _state_T_2 = tag_matches & is_dirty; // @[Misc.scala:38:9]
wire [3:0] _state_T_3 = {2'h1, _state_T_2, 1'h1}; // @[dcache.scala:233:{17,30}]
wire [3:0] _state_T_4 = tag_matches ? 4'h9 : 4'h0; // @[dcache.scala:172:28, :240:19]
wire [15:0][3:0] _GEN = {{state}, {state}, {state}, {state}, {state}, {4'h0}, {io_meta_write_ready_0 & io_meta_write_valid_0 ? 4'hA : state}, {io_wb_req_ready_0 ? 4'h9 : state}, {io_wb_req_ready_0 & io_wb_req_valid_0 ? 4'h8 : state}, {io_rep_ready_0 ? _state_T_4 : state}, {io_lsu_release_ready_0 & io_lsu_release_valid_0 ? 4'h6 : state}, {_state_T_3}, {_state_T_1}, {4'h3}, {io_meta_read_ready_0 & io_meta_read_valid_0 ? 4'h2 : state}, {state}}; // @[Decoupled.scala:51:35]
wire _T_7 = io_req_ready_0 & io_req_valid_0; // @[Decoupled.scala:51:35]
always @(posedge clock) begin // @[dcache.scala:145:7]
if (reset) // @[dcache.scala:145:7]
state <= 4'h0; // @[dcache.scala:165:22]
else if (|state) // @[dcache.scala:165:22, :178:27]
state <= _GEN[state]; // @[package.scala:16:47]
else if (_T_7) // @[Decoupled.scala:51:35]
state <= 4'h1; // @[dcache.scala:165:22]
if (~(|state) & _T_7) begin // @[Decoupled.scala:51:35]
req_opcode <= io_req_bits_opcode_0; // @[dcache.scala:145:7, :167:16]
req_param <= io_req_bits_param_0; // @[dcache.scala:145:7, :167:16]
req_size <= io_req_bits_size_0; // @[dcache.scala:145:7, :167:16]
req_source <= io_req_bits_source_0; // @[dcache.scala:145:7, :167:16]
req_address <= io_req_bits_address_0; // @[dcache.scala:145:7, :167:16]
req_mask <= io_req_bits_mask_0; // @[dcache.scala:145:7, :167:16]
req_data <= io_req_bits_data_0; // @[dcache.scala:145:7, :167:16]
req_corrupt <= io_req_bits_corrupt_0; // @[dcache.scala:145:7, :167:16]
end
if (~(|state) | _io_meta_read_valid_T | state == 4'h2 | state != 4'h3) begin // @[dcache.scala:165:22, :173:20, :178:27, :181:25, :188:31, :215:30, :220:39, :224:{22,39}, :227:{22,38}]
end
else begin // @[dcache.scala:173:20, :215:30, :220:39, :224:39, :227:38]
way_en <= io_way_en_0; // @[dcache.scala:145:7, :171:19]
old_coh_state <= io_block_state_state_0; // @[dcache.scala:145:7, :173:20]
end
always @(posedge)
assign io_req_ready = io_req_ready_0; // @[dcache.scala:145:7]
assign io_rep_valid = io_rep_valid_0; // @[dcache.scala:145:7]
assign io_rep_bits_param = io_rep_bits_param_0; // @[dcache.scala:145:7]
assign io_rep_bits_size = io_rep_bits_size_0; // @[dcache.scala:145:7]
assign io_rep_bits_source = io_rep_bits_source_0; // @[dcache.scala:145:7]
assign io_rep_bits_address = io_rep_bits_address_0; // @[dcache.scala:145:7]
assign io_meta_read_valid = io_meta_read_valid_0; // @[dcache.scala:145:7]
assign io_meta_read_bits_idx = io_meta_read_bits_idx_0; // @[dcache.scala:145:7]
assign io_meta_read_bits_tag = io_meta_read_bits_tag_0; // @[dcache.scala:145:7]
assign io_meta_write_valid = io_meta_write_valid_0; // @[dcache.scala:145:7]
assign io_meta_write_bits_idx = io_meta_write_bits_idx_0; // @[dcache.scala:145:7]
assign io_meta_write_bits_way_en = io_meta_write_bits_way_en_0; // @[dcache.scala:145:7]
assign io_meta_write_bits_tag = io_meta_write_bits_tag_0; // @[dcache.scala:145:7]
assign io_meta_write_bits_data_coh_state = io_meta_write_bits_data_coh_state_0; // @[dcache.scala:145:7]
assign io_meta_write_bits_data_tag = io_meta_write_bits_data_tag_0; // @[dcache.scala:145:7]
assign io_wb_req_valid = io_wb_req_valid_0; // @[dcache.scala:145:7]
assign io_wb_req_bits_tag = io_wb_req_bits_tag_0; // @[dcache.scala:145:7]
assign io_wb_req_bits_idx = io_wb_req_bits_idx_0; // @[dcache.scala:145:7]
assign io_wb_req_bits_source = io_wb_req_bits_source_0; // @[dcache.scala:145:7]
assign io_wb_req_bits_param = io_wb_req_bits_param_0; // @[dcache.scala:145:7]
assign io_wb_req_bits_way_en = io_wb_req_bits_way_en_0; // @[dcache.scala:145:7]
assign io_mshr_wb_rdy = io_mshr_wb_rdy_0; // @[dcache.scala:145:7]
assign io_lsu_release_valid = io_lsu_release_valid_0; // @[dcache.scala:145:7]
assign io_lsu_release_bits_param = io_lsu_release_bits_param_0; // @[dcache.scala:145:7]
assign io_lsu_release_bits_size = io_lsu_release_bits_size_0; // @[dcache.scala:145:7]
assign io_lsu_release_bits_source = io_lsu_release_bits_source_0; // @[dcache.scala:145:7]
assign io_lsu_release_bits_address = io_lsu_release_bits_address_0; // @[dcache.scala:145:7]
assign io_state_valid = io_state_valid_0; // @[dcache.scala:145:7]
assign io_state_bits = io_state_bits_0; // @[dcache.scala:145:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File Logger.scala:
// See LICENSE for license details
package roccaccutils.logger
import chisel3._
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.diplomacy.{ValName}
trait Logger {
// --------------------------
// MUST BE DEFINED BY CHILD
// --------------------------
def logInfoImplPrintWrapper(printf: chisel3.printf.Printf)(implicit p: Parameters = Parameters.empty): chisel3.printf.Printf
def logCriticalImplPrintWrapper(printf: chisel3.printf.Printf)(implicit p: Parameters = Parameters.empty): chisel3.printf.Printf
// --------------------------
def trimValName()(implicit valName: ValName): String = {
// TODO: For now don't trim since it can have different pre/post-fixes
//val trimAmt = if (valName.value.startsWith("<local")) 6 else 1
//println(s"Got this: ${valName.value}")
//"<" + valName.value.substring(trimAmt, valName.value.length)
valName.value
}
def createPrefix(typ: String)(implicit valName: ValName, withMod: Boolean, prefix: String): String = {
val s = Seq(s"${typ}", "%d") ++ (if (withMod) Seq(trimValName()) else Seq.empty) ++ (if (prefix != "") Seq(prefix) else Seq.empty)
":" + s.mkString(":") + ": "
}
def createFmtAndArgs(typ: String, format: String, args: Bits*)(implicit valName: ValName, withMod: Boolean, prefix: String): (String, Seq[Bits]) = {
val loginfo_cycles = RegInit(0.U(64.W))
loginfo_cycles := loginfo_cycles + 1.U
val allargs = Seq(loginfo_cycles) ++ args
val allfmt = createPrefix(typ) + format
(allfmt, allargs)
}
def logInfoImpl(format: String, args: Bits*)(implicit p: Parameters = Parameters.empty, valName: ValName, withMod: Boolean, prefix: String): Unit = {
val (allfmt, allargs) = createFmtAndArgs("INFO", format, args:_*)
logInfoImplPrintWrapper(printf(Printable.pack(allfmt, allargs:_*)))
}
def logCriticalImpl(format: String, args: Bits*)(implicit p: Parameters = Parameters.empty, valName: ValName, withMod: Boolean, prefix: String): Unit = {
val (allfmt, allargs) = createFmtAndArgs("CRIT", format, args:_*)
logCriticalImplPrintWrapper(printf(Printable.pack(allfmt, allargs:_*)))
}
// ---- USE THE FUNCTIONS BELOW ----
def logInfo(format: String, args: Bits*)(implicit p: Parameters = Parameters.empty, valName: ValName = ValName("<UnknownMod>"), prefix: String = ""): Unit = {
implicit val withMod = true
logInfoImpl(format, args:_*)
}
def logCritical(format: String, args: Bits*)(implicit p: Parameters = Parameters.empty, valName: ValName = ValName("<UnknownMod>"), prefix: String = ""): Unit = {
implicit val withMod = true
logCriticalImpl(format, args:_*)
}
def logInfoNoMod(format: String, args: Bits*)(implicit p: Parameters = Parameters.empty, valName: ValName = ValName("<UnknownMod>"), prefix: String = ""): Unit = {
implicit val withMod = false
logInfoImpl(format, args:_*)
}
def logCriticalNoMod(format: String, args: Bits*)(implicit p: Parameters = Parameters.empty, valName: ValName = ValName("<UnknownMod>"), prefix: String = ""): Unit = {
implicit val withMod = false
logCriticalImpl(format, args:_*)
}
}
// An example of a custom logger (that optionally only synthesizes critical messages):
//
// object MyLogger extends Logger {
// // just print info msgs
// def logInfoImplPrintWrapper(printf: chisel3.printf.Printf)(implicit p: Parameters = Parameters.empty): chisel3.printf.Printf = {
// printf
// }
//
// // optionally synthesize critical msgs
// def logCriticalImplPrintWrapper(printf: chisel3.printf.Printf)(implicit p: Parameters = Parameters.empty): chisel3.printf.Printf = {
// if (p(EnablePrintfSynthesis)) {
// SynthesizePrintf(printf) // function comes from midas.targetutils
// } else {
// printf
// }
// }
// }
object DefaultLogger extends Logger {
// just print info msgs
def logInfoImplPrintWrapper(printf: chisel3.printf.Printf)(implicit p: Parameters = Parameters.empty): chisel3.printf.Printf = {
printf
}
// just print critical msgs
def logCriticalImplPrintWrapper(printf: chisel3.printf.Printf)(implicit p: Parameters = Parameters.empty): chisel3.printf.Printf = {
printf
}
}
File MemStreamer.scala:
// See LICENSE for license details
package roccaccutils
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.util.{DecoupledHelper}
import freechips.rocketchip.diplomacy.{ValName}
import roccaccutils.logger._
class MemStreamerBundle(implicit hp: L2MemHelperParams) extends Bundle {
val mem_stream = Flipped(new MemLoaderConsumerBundle) //from MemLoader
val memwrites_in = Decoupled(new WriterBundle) //to MemWriter
}
trait MemStreamer
extends Module
with HasL2MemHelperParams {
// --------------------------
// MUST BE DEFINED BY CHILD
// --------------------------
val io: MemStreamerBundle
implicit val p: Parameters
implicit val valName: ValName = ValName("<MemStreamer>")
val logger: Logger
// --------------------------
// 1. Receive data from the memloader to load_data_queue
/* Slice data by the L2 bandwidth (assuming 32 bytes).
** Different L2 bandwidth will require to change
** the memwriter module in Top.scala
** and the LiteralChunk bundle in Common.scala. */
val load_data_queue = Module(new Queue(new LiteralChunk, 5))
dontTouch(load_data_queue.io.count)
load_data_queue.io.enq.bits.chunk_data := io.mem_stream.output_data
load_data_queue.io.enq.bits.chunk_size_bytes := io.mem_stream.available_output_bytes
load_data_queue.io.enq.bits.is_final_chunk := io.mem_stream.output_last_chunk
val fire_read = DecoupledHelper(
io.mem_stream.output_valid,
load_data_queue.io.enq.ready,
)
load_data_queue.io.enq.valid := fire_read.fire(load_data_queue.io.enq.ready)
io.mem_stream.output_ready := fire_read.fire(io.mem_stream.output_valid)
io.mem_stream.user_consumed_bytes := io.mem_stream.available_output_bytes
// ----------------------------
// API: connect load_data_queue
// ----------------------------
when (load_data_queue.io.enq.fire) {
logger.logInfo("load_data_q:enq: sz:%x final:%x data:%x\n",
load_data_queue.io.enq.bits.chunk_size_bytes,
load_data_queue.io.enq.bits.is_final_chunk,
load_data_queue.io.enq.bits.chunk_data,
)
}
when (load_data_queue.io.deq.fire) {
logger.logInfo("load_data_q:deq: sz:%x final:%x data:%x\n",
load_data_queue.io.deq.bits.chunk_size_bytes,
load_data_queue.io.deq.bits.is_final_chunk,
load_data_queue.io.deq.bits.chunk_data,
)
}
// 3. Write data to through the memwriter
val store_data_queue = Module(new Queue(new LiteralChunk, 5))
dontTouch(store_data_queue.io.count)
val sdq_chunk_size = store_data_queue.io.deq.bits.chunk_size_bytes
val sdq_chunk_data = store_data_queue.io.deq.bits.chunk_data
val sdq_chunk_data_vec = VecInit(Seq.fill(BUS_SZ_BYTES)(0.U(8.W)))
for (i <- 0 to (BUS_SZ_BYTES - 1)) {
sdq_chunk_data_vec(sdq_chunk_size - 1.U - i.U) := sdq_chunk_data((8*(i+1))-1, 8*i)
}
io.memwrites_in.bits.data := sdq_chunk_data_vec.asUInt
io.memwrites_in.bits.validbytes := sdq_chunk_size
io.memwrites_in.bits.end_of_message := store_data_queue.io.deq.bits.is_final_chunk
io.memwrites_in.valid := store_data_queue.io.deq.valid
store_data_queue.io.deq.ready := io.memwrites_in.ready
// -----------------------------
// API: connect store_data_queue
// -----------------------------
when (store_data_queue.io.enq.fire) {
logger.logInfo("store_data_q:enq: sz:%x final:%x data:%x\n",
store_data_queue.io.enq.bits.chunk_size_bytes,
store_data_queue.io.enq.bits.is_final_chunk,
store_data_queue.io.enq.bits.chunk_data,
)
}
when (store_data_queue.io.deq.fire) {
logger.logInfo("store_data_q:deq: sz:%x final:%x data:%x\n",
store_data_queue.io.deq.bits.chunk_size_bytes,
store_data_queue.io.deq.bits.is_final_chunk,
store_data_queue.io.deq.bits.chunk_data,
)
}
}
File AES256ECB.scala:
// See LICENSE for license details
package aes
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.util.{DecoupledHelper}
import testchipip.serdes.{StreamWidener, StreamNarrower}
import roccaccutils._
import roccaccutils.logger._
import AES256Consts._
class AES256ECB(val logger: Logger = DefaultLogger)(implicit val p: Parameters, val hp: L2MemHelperParams) extends MemStreamer {
class AES256ECBBundle extends MemStreamerBundle {
val key = Flipped(Valid(UInt(AES256Consts.KEY_SZ_BITS.W))) //from CommandRouter
val mode = Flipped(Valid(Bool())) //from CommandRouter
}
lazy val io = IO(new AES256ECBBundle)
// Connect AES core to MemLoader (i.e. load_data_queue)
val aes = Module(new AesCipherCoreDriver)
assert(BLOCK_SZ_BITS <= BUS_SZ_BITS, "Need the bus bits to be greater than (or equal to) the block bits")
val snarrower = Module(new StreamNarrower(BUS_SZ_BITS, BLOCK_SZ_BITS))
val swidener = Module(new StreamWidener(BLOCK_SZ_BITS, BUS_SZ_BITS))
val key = RegInit(0.U(AES256Consts.KEY_SZ_BITS.W))
when (io.key.valid) {
key := io.key.bits
}
val mode = RegInit(false.B)
when (io.mode.valid) {
mode := io.mode.bits
}
val last_queue = Module(new Queue(Bool(), 5)) // keep track of stream.bits.last in aes compute
val na_fire = DecoupledHelper(
snarrower.io.out.valid,
last_queue.io.enq.ready,
aes.io.in.ready
)
val aw_fire = DecoupledHelper(
aes.io.out.valid,
last_queue.io.deq.valid,
swidener.io.in.ready
)
last_queue.io.enq.bits := snarrower.io.out.bits.last
last_queue.io.enq.valid := na_fire.fire(last_queue.io.enq.ready)
last_queue.io.deq.ready := aw_fire.fire(last_queue.io.deq.valid)
aes.io.in.bits.key := key
aes.io.in.bits.encrypt := mode
aes.io.in.bits.data := snarrower.io.out.bits.data
aes.io.in.valid := snarrower.io.out.valid
snarrower.io.out.ready := na_fire.fire(snarrower.io.out.valid)
swidener.io.in.bits.data := aes.io.out.bits.data
swidener.io.in.bits.keep := (1.U << BLOCK_SZ_BYTES) - 1.U
swidener.io.in.bits.last := last_queue.io.deq.bits
swidener.io.in.valid := aw_fire.fire(swidener.io.in.ready)
aes.io.out.ready := aw_fire.fire(aes.io.out.valid)
snarrower.io.in.bits.data := load_data_queue.io.deq.bits.chunk_data
snarrower.io.in.bits.keep := (1.U << load_data_queue.io.deq.bits.chunk_size_bytes) - 1.U
snarrower.io.in.bits.last := load_data_queue.io.deq.bits.is_final_chunk
val narrow_fire = DecoupledHelper(
load_data_queue.io.deq.valid,
snarrower.io.in.ready,
)
snarrower.io.in.valid := narrow_fire.fire(snarrower.io.in.ready)
load_data_queue.io.deq.ready := narrow_fire.fire(load_data_queue.io.deq.valid)
// Connect AES core output to MemWriter (i.e. store_data_queue)
store_data_queue.io.enq.bits.chunk_data := swidener.io.out.bits.data
store_data_queue.io.enq.bits.chunk_size_bytes := PopCount(swidener.io.out.bits.keep)
store_data_queue.io.enq.bits.is_final_chunk := swidener.io.out.bits.last
val write_fire = DecoupledHelper(
swidener.io.out.valid,
store_data_queue.io.enq.ready
)
store_data_queue.io.enq.valid := write_fire.fire(store_data_queue.io.enq.ready)
swidener.io.out.ready := write_fire.fire(swidener.io.out.valid)
}
| module AES256ECB( // @[AES256ECB.scala:16:7]
input clock, // @[AES256ECB.scala:16:7]
input reset, // @[AES256ECB.scala:16:7]
output [5:0] load_data_queue_io_enq_bits_chunk_data_io_mem_stream_user_consumed_bytes, // @[AES256ECB.scala:21:19]
input [5:0] load_data_queue_io_enq_bits_chunk_data_io_mem_stream_available_output_bytes, // @[AES256ECB.scala:21:19]
input load_data_queue_io_enq_bits_chunk_data_io_mem_stream_output_valid, // @[AES256ECB.scala:21:19]
output load_data_queue_io_enq_bits_chunk_data_io_mem_stream_output_ready, // @[AES256ECB.scala:21:19]
input [255:0] load_data_queue_io_enq_bits_chunk_data_io_mem_stream_output_data, // @[AES256ECB.scala:21:19]
input load_data_queue_io_enq_bits_chunk_data_io_mem_stream_output_last_chunk, // @[AES256ECB.scala:21:19]
input load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_ready, // @[AES256ECB.scala:21:19]
output load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_valid, // @[AES256ECB.scala:21:19]
output [255:0] load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_bits_data, // @[AES256ECB.scala:21:19]
output [5:0] load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_bits_validbytes, // @[AES256ECB.scala:21:19]
output load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_bits_end_of_message, // @[AES256ECB.scala:21:19]
input load_data_queue_io_enq_bits_chunk_data_io_key_valid, // @[AES256ECB.scala:21:19]
input [255:0] load_data_queue_io_enq_bits_chunk_data_io_key_bits, // @[AES256ECB.scala:21:19]
input load_data_queue_io_enq_bits_chunk_data_io_mode_valid, // @[AES256ECB.scala:21:19]
input load_data_queue_io_enq_bits_chunk_data_io_mode_bits // @[AES256ECB.scala:21:19]
);
wire _last_queue_io_enq_ready; // @[AES256ECB.scala:41:26]
wire _last_queue_io_deq_valid; // @[AES256ECB.scala:41:26]
wire _last_queue_io_deq_bits; // @[AES256ECB.scala:41:26]
wire _swidener_io_in_ready; // @[AES256ECB.scala:30:24]
wire _swidener_io_out_valid; // @[AES256ECB.scala:30:24]
wire [255:0] _swidener_io_out_bits_data; // @[AES256ECB.scala:30:24]
wire [31:0] _swidener_io_out_bits_keep; // @[AES256ECB.scala:30:24]
wire _swidener_io_out_bits_last; // @[AES256ECB.scala:30:24]
wire _snarrower_io_in_ready; // @[AES256ECB.scala:29:25]
wire _snarrower_io_out_valid; // @[AES256ECB.scala:29:25]
wire [127:0] _snarrower_io_out_bits_data; // @[AES256ECB.scala:29:25]
wire _snarrower_io_out_bits_last; // @[AES256ECB.scala:29:25]
wire _aes_io_in_ready; // @[AES256ECB.scala:25:19]
wire _aes_io_out_valid; // @[AES256ECB.scala:25:19]
wire [127:0] _aes_io_out_bits_data; // @[AES256ECB.scala:25:19]
wire _store_data_queue_io_enq_ready; // @[MemStreamer.scala:74:32]
wire _store_data_queue_io_deq_valid; // @[MemStreamer.scala:74:32]
wire [255:0] _store_data_queue_io_deq_bits_chunk_data; // @[MemStreamer.scala:74:32]
wire [5:0] _store_data_queue_io_deq_bits_chunk_size_bytes; // @[MemStreamer.scala:74:32]
wire _store_data_queue_io_deq_bits_is_final_chunk; // @[MemStreamer.scala:74:32]
wire _load_data_queue_io_enq_ready; // @[MemStreamer.scala:39:31]
wire _load_data_queue_io_deq_valid; // @[MemStreamer.scala:39:31]
wire [255:0] _load_data_queue_io_deq_bits_chunk_data; // @[MemStreamer.scala:39:31]
wire [5:0] _load_data_queue_io_deq_bits_chunk_size_bytes; // @[MemStreamer.scala:39:31]
wire _load_data_queue_io_deq_bits_is_final_chunk; // @[MemStreamer.scala:39:31]
wire [5:0] load_data_queue_io_enq_bits_chunk_data_io_mem_stream_available_output_bytes_0 = load_data_queue_io_enq_bits_chunk_data_io_mem_stream_available_output_bytes; // @[AES256ECB.scala:16:7]
wire load_data_queue_io_enq_bits_chunk_data_io_mem_stream_output_valid_0 = load_data_queue_io_enq_bits_chunk_data_io_mem_stream_output_valid; // @[AES256ECB.scala:16:7]
wire [255:0] load_data_queue_io_enq_bits_chunk_data_io_mem_stream_output_data_0 = load_data_queue_io_enq_bits_chunk_data_io_mem_stream_output_data; // @[AES256ECB.scala:16:7]
wire load_data_queue_io_enq_bits_chunk_data_io_mem_stream_output_last_chunk_0 = load_data_queue_io_enq_bits_chunk_data_io_mem_stream_output_last_chunk; // @[AES256ECB.scala:16:7]
wire load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_ready_0 = load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_ready; // @[AES256ECB.scala:16:7]
wire load_data_queue_io_enq_bits_chunk_data_io_key_valid_0 = load_data_queue_io_enq_bits_chunk_data_io_key_valid; // @[AES256ECB.scala:16:7]
wire [255:0] load_data_queue_io_enq_bits_chunk_data_io_key_bits_0 = load_data_queue_io_enq_bits_chunk_data_io_key_bits; // @[AES256ECB.scala:16:7]
wire load_data_queue_io_enq_bits_chunk_data_io_mode_valid_0 = load_data_queue_io_enq_bits_chunk_data_io_mode_valid; // @[AES256ECB.scala:16:7]
wire load_data_queue_io_enq_bits_chunk_data_io_mode_bits_0 = load_data_queue_io_enq_bits_chunk_data_io_mode_bits; // @[AES256ECB.scala:16:7]
wire [16:0] _swidener_io_in_bits_keep_T_2 = 17'hFFFF; // @[AES256ECB.scala:67:55]
wire [17:0] _swidener_io_in_bits_keep_T_1 = 18'hFFFF; // @[AES256ECB.scala:67:55]
wire [16:0] _swidener_io_in_bits_keep_T = 17'h10000; // @[AES256ECB.scala:67:36]
wire [5:0] load_data_queue_io_enq_bits_chunk_data_io_mem_stream_user_consumed_bytes_0 = load_data_queue_io_enq_bits_chunk_data_io_mem_stream_available_output_bytes_0; // @[AES256ECB.scala:16:7]
wire [255:0] _io_memwrites_in_bits_data_T; // @[MemStreamer.scala:83:51]
wire load_data_queue_io_enq_bits_chunk_data_io_mem_stream_output_ready_0; // @[AES256ECB.scala:16:7]
wire [255:0] load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_bits_data_0; // @[AES256ECB.scala:16:7]
wire [5:0] load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_bits_validbytes_0; // @[AES256ECB.scala:16:7]
wire load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_bits_end_of_message_0; // @[AES256ECB.scala:16:7]
wire load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_valid_0; // @[AES256ECB.scala:16:7]
reg [63:0] allargs_0; // @[Logger.scala:37:33]
wire [64:0] _loginfo_cycles_T = {1'h0, allargs_0} + 65'h1; // @[Logger.scala:37:33, :38:38]
wire [63:0] _loginfo_cycles_T_1 = _loginfo_cycles_T[63:0]; // @[Logger.scala:38:38]
reg [63:0] allargs_0_1; // @[Logger.scala:37:33]
wire [64:0] _loginfo_cycles_T_2 = {1'h0, allargs_0_1} + 65'h1; // @[Logger.scala:37:33, :38:38]
wire [63:0] _loginfo_cycles_T_3 = _loginfo_cycles_T_2[63:0]; // @[Logger.scala:38:38]
wire [7:0] sdq_chunk_data_vec_0; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_1; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_2; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_3; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_4; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_5; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_6; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_7; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_8; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_9; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_10; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_11; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_12; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_13; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_14; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_15; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_16; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_17; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_18; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_19; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_20; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_21; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_22; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_23; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_24; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_25; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_26; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_27; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_28; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_29; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_30; // @[MemStreamer.scala:79:35]
wire [7:0] sdq_chunk_data_vec_31; // @[MemStreamer.scala:79:35]
wire [4:0] _T_161 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] - 5'h1; // @[MemStreamer.scala:74:32, :81:39]
wire [7:0] _sdq_chunk_data_vec_T = _store_data_queue_io_deq_bits_chunk_data[7:0]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_13 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] - 5'h2; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_1 = _store_data_queue_io_deq_bits_chunk_data[15:8]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_18 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] - 5'h3; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_2 = _store_data_queue_io_deq_bits_chunk_data[23:16]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_23 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] - 5'h4; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_3 = _store_data_queue_io_deq_bits_chunk_data[31:24]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_28 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] - 5'h5; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_4 = _store_data_queue_io_deq_bits_chunk_data[39:32]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_33 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] - 5'h6; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_5 = _store_data_queue_io_deq_bits_chunk_data[47:40]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_38 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] - 5'h7; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_6 = _store_data_queue_io_deq_bits_chunk_data[55:48]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_43 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] - 5'h8; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_7 = _store_data_queue_io_deq_bits_chunk_data[63:56]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_48 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] - 5'h9; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_8 = _store_data_queue_io_deq_bits_chunk_data[71:64]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_53 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] - 5'hA; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_9 = _store_data_queue_io_deq_bits_chunk_data[79:72]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_58 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] - 5'hB; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_10 = _store_data_queue_io_deq_bits_chunk_data[87:80]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_63 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] - 5'hC; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_11 = _store_data_queue_io_deq_bits_chunk_data[95:88]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_68 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] - 5'hD; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_12 = _store_data_queue_io_deq_bits_chunk_data[103:96]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_73 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] - 5'hE; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_13 = _store_data_queue_io_deq_bits_chunk_data[111:104]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_78 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] - 5'hF; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_14 = _store_data_queue_io_deq_bits_chunk_data[119:112]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_83 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] - 5'h10; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_15 = _store_data_queue_io_deq_bits_chunk_data[127:120]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_88 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] + 5'hF; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_16 = _store_data_queue_io_deq_bits_chunk_data[135:128]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_93 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] + 5'hE; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_17 = _store_data_queue_io_deq_bits_chunk_data[143:136]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_98 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] + 5'hD; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_18 = _store_data_queue_io_deq_bits_chunk_data[151:144]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_103 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] + 5'hC; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_19 = _store_data_queue_io_deq_bits_chunk_data[159:152]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_108 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] + 5'hB; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_20 = _store_data_queue_io_deq_bits_chunk_data[167:160]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_113 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] + 5'hA; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_21 = _store_data_queue_io_deq_bits_chunk_data[175:168]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_118 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] + 5'h9; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_22 = _store_data_queue_io_deq_bits_chunk_data[183:176]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_123 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] + 5'h8; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_23 = _store_data_queue_io_deq_bits_chunk_data[191:184]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_128 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] + 5'h7; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_24 = _store_data_queue_io_deq_bits_chunk_data[199:192]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_133 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] + 5'h6; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_25 = _store_data_queue_io_deq_bits_chunk_data[207:200]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_138 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] + 5'h5; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_26 = _store_data_queue_io_deq_bits_chunk_data[215:208]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_143 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] + 5'h4; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_27 = _store_data_queue_io_deq_bits_chunk_data[223:216]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_148 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] + 5'h3; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_28 = _store_data_queue_io_deq_bits_chunk_data[231:224]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_153 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] + 5'h2; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_29 = _store_data_queue_io_deq_bits_chunk_data[239:232]; // @[MemStreamer.scala:74:32, :81:69]
wire [4:0] _T_158 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] + 5'h1; // @[MemStreamer.scala:74:32, :81:45]
wire [7:0] _sdq_chunk_data_vec_T_30 = _store_data_queue_io_deq_bits_chunk_data[247:240]; // @[MemStreamer.scala:74:32, :81:69]
wire [7:0] _sdq_chunk_data_vec_T_31 = _store_data_queue_io_deq_bits_chunk_data[255:248]; // @[MemStreamer.scala:74:32, :81:69]
assign sdq_chunk_data_vec_0 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h0 ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h0 ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h0 ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h0 ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h0 ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h0 ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h0 ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h0 ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h0 ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h0 ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h0 ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h0 ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h0 ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h0 ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h0 ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h0 ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h0 ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h0 ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h0 ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h0 ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h0 ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h0 ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h0 ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h0 ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h0 ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h0 ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h0 ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h0 ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h0 ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h0 ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h0 ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h0 ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_1 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h1 ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h1 ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h1 ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h1 ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h1 ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h1 ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h1 ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h1 ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h1 ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h1 ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h1 ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h1 ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h1 ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h1 ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h1 ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h1 ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h1 ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h1 ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h1 ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h1 ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h1 ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h1 ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h1 ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h1 ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h1 ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h1 ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h1 ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h1 ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h1 ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h1 ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h1 ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h1 ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_2 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h2 ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h2 ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h2 ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h2 ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h2 ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h2 ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h2 ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h2 ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h2 ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h2 ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h2 ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h2 ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h2 ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h2 ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h2 ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h2 ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h2 ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h2 ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h2 ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h2 ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h2 ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h2 ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h2 ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h2 ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h2 ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h2 ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h2 ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h2 ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h2 ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h2 ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h2 ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h2 ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_3 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h3 ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h3 ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h3 ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h3 ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h3 ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h3 ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h3 ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h3 ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h3 ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h3 ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h3 ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h3 ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h3 ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h3 ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h3 ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h3 ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h3 ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h3 ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h3 ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h3 ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h3 ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h3 ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h3 ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h3 ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h3 ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h3 ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h3 ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h3 ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h3 ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h3 ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h3 ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h3 ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_4 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h4 ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h4 ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h4 ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h4 ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h4 ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h4 ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h4 ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h4 ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h4 ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h4 ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h4 ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h4 ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h4 ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h4 ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h4 ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h4 ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h4 ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h4 ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h4 ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h4 ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h4 ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h4 ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h4 ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h4 ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h4 ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h4 ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h4 ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h4 ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h4 ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h4 ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h4 ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h4 ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_5 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h5 ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h5 ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h5 ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h5 ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h5 ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h5 ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h5 ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h5 ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h5 ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h5 ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h5 ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h5 ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h5 ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h5 ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h5 ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h5 ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h5 ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h5 ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h5 ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h5 ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h5 ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h5 ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h5 ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h5 ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h5 ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h5 ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h5 ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h5 ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h5 ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h5 ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h5 ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h5 ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_6 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h6 ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h6 ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h6 ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h6 ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h6 ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h6 ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h6 ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h6 ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h6 ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h6 ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h6 ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h6 ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h6 ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h6 ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h6 ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h6 ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h6 ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h6 ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h6 ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h6 ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h6 ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h6 ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h6 ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h6 ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h6 ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h6 ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h6 ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h6 ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h6 ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h6 ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h6 ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h6 ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_7 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h7 ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h7 ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h7 ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h7 ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h7 ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h7 ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h7 ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h7 ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h7 ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h7 ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h7 ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h7 ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h7 ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h7 ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h7 ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h7 ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h7 ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h7 ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h7 ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h7 ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h7 ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h7 ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h7 ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h7 ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h7 ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h7 ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h7 ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h7 ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h7 ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h7 ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h7 ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h7 ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_8 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h8 ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h8 ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h8 ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h8 ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h8 ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h8 ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h8 ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h8 ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h8 ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h8 ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h8 ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h8 ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h8 ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h8 ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h8 ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h8 ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h8 ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h8 ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h8 ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h8 ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h8 ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h8 ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h8 ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h8 ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h8 ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h8 ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h8 ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h8 ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h8 ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h8 ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h8 ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h8 ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_9 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h9 ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h9 ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h9 ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h9 ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h9 ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h9 ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h9 ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h9 ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h9 ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h9 ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h9 ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h9 ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h9 ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h9 ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h9 ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h9 ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h9 ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h9 ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h9 ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h9 ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h9 ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h9 ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h9 ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h9 ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h9 ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h9 ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h9 ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h9 ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h9 ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h9 ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h9 ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h9 ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_10 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'hA ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'hA ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'hA ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'hA ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'hA ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'hA ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'hA ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'hA ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'hA ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'hA ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'hA ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'hA ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'hA ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'hA ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'hA ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'hA ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'hA ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'hA ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'hA ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'hA ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'hA ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'hA ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'hA ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'hA ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'hA ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'hA ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'hA ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'hA ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'hA ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'hA ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'hA ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'hA ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_11 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'hB ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'hB ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'hB ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'hB ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'hB ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'hB ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'hB ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'hB ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'hB ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'hB ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'hB ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'hB ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'hB ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'hB ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'hB ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'hB ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'hB ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'hB ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'hB ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'hB ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'hB ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'hB ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'hB ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'hB ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'hB ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'hB ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'hB ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'hB ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'hB ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'hB ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'hB ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'hB ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_12 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'hC ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'hC ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'hC ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'hC ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'hC ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'hC ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'hC ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'hC ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'hC ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'hC ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'hC ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'hC ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'hC ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'hC ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'hC ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'hC ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'hC ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'hC ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'hC ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'hC ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'hC ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'hC ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'hC ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'hC ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'hC ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'hC ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'hC ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'hC ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'hC ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'hC ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'hC ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'hC ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_13 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'hD ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'hD ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'hD ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'hD ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'hD ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'hD ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'hD ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'hD ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'hD ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'hD ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'hD ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'hD ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'hD ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'hD ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'hD ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'hD ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'hD ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'hD ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'hD ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'hD ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'hD ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'hD ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'hD ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'hD ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'hD ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'hD ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'hD ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'hD ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'hD ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'hD ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'hD ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'hD ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_14 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'hE ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'hE ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'hE ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'hE ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'hE ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'hE ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'hE ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'hE ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'hE ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'hE ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'hE ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'hE ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'hE ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'hE ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'hE ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'hE ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'hE ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'hE ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'hE ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'hE ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'hE ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'hE ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'hE ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'hE ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'hE ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'hE ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'hE ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'hE ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'hE ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'hE ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'hE ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'hE ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_15 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'hF ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'hF ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'hF ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'hF ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'hF ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'hF ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'hF ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'hF ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'hF ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'hF ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'hF ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'hF ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'hF ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'hF ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'hF ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'hF ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'hF ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'hF ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'hF ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'hF ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'hF ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'hF ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'hF ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'hF ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'hF ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'hF ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'hF ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'hF ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'hF ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'hF ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'hF ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'hF ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_16 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h10 ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h10 ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h10 ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h10 ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h10 ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h10 ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h10 ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h10 ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h10 ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h10 ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h10 ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h10 ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h10 ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h10 ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h10 ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h10 ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h10 ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h10 ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h10 ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h10 ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h10 ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h10 ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h10 ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h10 ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h10 ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h10 ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h10 ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h10 ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h10 ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h10 ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h10 ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h10 ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_17 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h11 ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h11 ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h11 ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h11 ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h11 ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h11 ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h11 ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h11 ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h11 ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h11 ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h11 ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h11 ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h11 ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h11 ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h11 ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h11 ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h11 ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h11 ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h11 ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h11 ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h11 ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h11 ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h11 ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h11 ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h11 ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h11 ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h11 ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h11 ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h11 ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h11 ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h11 ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h11 ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_18 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h12 ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h12 ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h12 ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h12 ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h12 ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h12 ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h12 ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h12 ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h12 ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h12 ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h12 ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h12 ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h12 ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h12 ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h12 ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h12 ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h12 ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h12 ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h12 ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h12 ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h12 ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h12 ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h12 ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h12 ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h12 ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h12 ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h12 ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h12 ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h12 ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h12 ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h12 ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h12 ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_19 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h13 ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h13 ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h13 ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h13 ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h13 ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h13 ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h13 ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h13 ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h13 ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h13 ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h13 ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h13 ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h13 ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h13 ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h13 ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h13 ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h13 ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h13 ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h13 ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h13 ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h13 ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h13 ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h13 ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h13 ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h13 ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h13 ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h13 ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h13 ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h13 ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h13 ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h13 ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h13 ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_20 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h14 ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h14 ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h14 ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h14 ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h14 ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h14 ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h14 ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h14 ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h14 ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h14 ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h14 ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h14 ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h14 ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h14 ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h14 ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h14 ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h14 ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h14 ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h14 ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h14 ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h14 ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h14 ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h14 ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h14 ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h14 ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h14 ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h14 ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h14 ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h14 ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h14 ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h14 ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h14 ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_21 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h15 ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h15 ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h15 ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h15 ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h15 ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h15 ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h15 ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h15 ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h15 ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h15 ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h15 ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h15 ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h15 ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h15 ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h15 ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h15 ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h15 ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h15 ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h15 ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h15 ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h15 ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h15 ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h15 ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h15 ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h15 ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h15 ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h15 ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h15 ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h15 ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h15 ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h15 ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h15 ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_22 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h16 ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h16 ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h16 ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h16 ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h16 ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h16 ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h16 ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h16 ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h16 ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h16 ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h16 ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h16 ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h16 ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h16 ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h16 ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h16 ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h16 ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h16 ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h16 ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h16 ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h16 ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h16 ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h16 ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h16 ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h16 ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h16 ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h16 ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h16 ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h16 ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h16 ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h16 ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h16 ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_23 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h17 ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h17 ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h17 ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h17 ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h17 ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h17 ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h17 ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h17 ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h17 ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h17 ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h17 ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h17 ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h17 ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h17 ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h17 ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h17 ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h17 ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h17 ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h17 ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h17 ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h17 ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h17 ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h17 ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h17 ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h17 ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h17 ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h17 ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h17 ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h17 ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h17 ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h17 ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h17 ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_24 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h18 ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h18 ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h18 ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h18 ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h18 ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h18 ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h18 ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h18 ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h18 ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h18 ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h18 ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h18 ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h18 ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h18 ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h18 ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h18 ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h18 ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h18 ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h18 ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h18 ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h18 ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h18 ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h18 ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h18 ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h18 ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h18 ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h18 ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h18 ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h18 ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h18 ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h18 ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h18 ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_25 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h19 ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h19 ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h19 ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h19 ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h19 ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h19 ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h19 ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h19 ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h19 ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h19 ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h19 ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h19 ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h19 ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h19 ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h19 ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h19 ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h19 ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h19 ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h19 ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h19 ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h19 ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h19 ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h19 ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h19 ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h19 ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h19 ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h19 ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h19 ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h19 ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h19 ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h19 ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h19 ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_26 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h1A ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h1A ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h1A ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h1A ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h1A ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h1A ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h1A ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h1A ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h1A ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h1A ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h1A ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h1A ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h1A ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h1A ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h1A ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h1A ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h1A ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h1A ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h1A ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h1A ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h1A ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h1A ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h1A ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h1A ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h1A ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h1A ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h1A ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h1A ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h1A ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h1A ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h1A ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h1A ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_27 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h1B ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h1B ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h1B ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h1B ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h1B ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h1B ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h1B ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h1B ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h1B ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h1B ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h1B ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h1B ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h1B ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h1B ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h1B ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h1B ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h1B ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h1B ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h1B ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h1B ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h1B ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h1B ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h1B ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h1B ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h1B ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h1B ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h1B ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h1B ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h1B ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h1B ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h1B ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h1B ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_28 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h1C ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h1C ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h1C ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h1C ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h1C ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h1C ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h1C ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h1C ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h1C ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h1C ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h1C ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h1C ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h1C ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h1C ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h1C ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h1C ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h1C ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h1C ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h1C ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h1C ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h1C ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h1C ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h1C ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h1C ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h1C ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h1C ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h1C ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h1C ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h1C ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h1C ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h1C ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h1C ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_29 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h1D ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h1D ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h1D ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h1D ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h1D ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h1D ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h1D ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h1D ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h1D ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h1D ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h1D ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h1D ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h1D ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h1D ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h1D ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h1D ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h1D ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h1D ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h1D ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h1D ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h1D ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h1D ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h1D ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h1D ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h1D ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h1D ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h1D ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h1D ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h1D ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h1D ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h1D ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h1D ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_30 = _store_data_queue_io_deq_bits_chunk_size_bytes[4:0] == 5'h1E ? _sdq_chunk_data_vec_T_31 : _T_158 == 5'h1E ? _sdq_chunk_data_vec_T_30 : _T_153 == 5'h1E ? _sdq_chunk_data_vec_T_29 : _T_148 == 5'h1E ? _sdq_chunk_data_vec_T_28 : _T_143 == 5'h1E ? _sdq_chunk_data_vec_T_27 : _T_138 == 5'h1E ? _sdq_chunk_data_vec_T_26 : _T_133 == 5'h1E ? _sdq_chunk_data_vec_T_25 : _T_128 == 5'h1E ? _sdq_chunk_data_vec_T_24 : _T_123 == 5'h1E ? _sdq_chunk_data_vec_T_23 : _T_118 == 5'h1E ? _sdq_chunk_data_vec_T_22 : _T_113 == 5'h1E ? _sdq_chunk_data_vec_T_21 : _T_108 == 5'h1E ? _sdq_chunk_data_vec_T_20 : _T_103 == 5'h1E ? _sdq_chunk_data_vec_T_19 : _T_98 == 5'h1E ? _sdq_chunk_data_vec_T_18 : _T_93 == 5'h1E ? _sdq_chunk_data_vec_T_17 : _T_88 == 5'h1E ? _sdq_chunk_data_vec_T_16 : _T_83 == 5'h1E ? _sdq_chunk_data_vec_T_15 : _T_78 == 5'h1E ? _sdq_chunk_data_vec_T_14 : _T_73 == 5'h1E ? _sdq_chunk_data_vec_T_13 : _T_68 == 5'h1E ? _sdq_chunk_data_vec_T_12 : _T_63 == 5'h1E ? _sdq_chunk_data_vec_T_11 : _T_58 == 5'h1E ? _sdq_chunk_data_vec_T_10 : _T_53 == 5'h1E ? _sdq_chunk_data_vec_T_9 : _T_48 == 5'h1E ? _sdq_chunk_data_vec_T_8 : _T_43 == 5'h1E ? _sdq_chunk_data_vec_T_7 : _T_38 == 5'h1E ? _sdq_chunk_data_vec_T_6 : _T_33 == 5'h1E ? _sdq_chunk_data_vec_T_5 : _T_28 == 5'h1E ? _sdq_chunk_data_vec_T_4 : _T_23 == 5'h1E ? _sdq_chunk_data_vec_T_3 : _T_18 == 5'h1E ? _sdq_chunk_data_vec_T_2 : _T_13 == 5'h1E ? _sdq_chunk_data_vec_T_1 : _T_161 == 5'h1E ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
assign sdq_chunk_data_vec_31 = (&(_store_data_queue_io_deq_bits_chunk_size_bytes[4:0])) ? _sdq_chunk_data_vec_T_31 : (&_T_158) ? _sdq_chunk_data_vec_T_30 : (&_T_153) ? _sdq_chunk_data_vec_T_29 : (&_T_148) ? _sdq_chunk_data_vec_T_28 : (&_T_143) ? _sdq_chunk_data_vec_T_27 : (&_T_138) ? _sdq_chunk_data_vec_T_26 : (&_T_133) ? _sdq_chunk_data_vec_T_25 : (&_T_128) ? _sdq_chunk_data_vec_T_24 : (&_T_123) ? _sdq_chunk_data_vec_T_23 : (&_T_118) ? _sdq_chunk_data_vec_T_22 : (&_T_113) ? _sdq_chunk_data_vec_T_21 : (&_T_108) ? _sdq_chunk_data_vec_T_20 : (&_T_103) ? _sdq_chunk_data_vec_T_19 : (&_T_98) ? _sdq_chunk_data_vec_T_18 : (&_T_93) ? _sdq_chunk_data_vec_T_17 : (&_T_88) ? _sdq_chunk_data_vec_T_16 : (&_T_83) ? _sdq_chunk_data_vec_T_15 : (&_T_78) ? _sdq_chunk_data_vec_T_14 : (&_T_73) ? _sdq_chunk_data_vec_T_13 : (&_T_68) ? _sdq_chunk_data_vec_T_12 : (&_T_63) ? _sdq_chunk_data_vec_T_11 : (&_T_58) ? _sdq_chunk_data_vec_T_10 : (&_T_53) ? _sdq_chunk_data_vec_T_9 : (&_T_48) ? _sdq_chunk_data_vec_T_8 : (&_T_43) ? _sdq_chunk_data_vec_T_7 : (&_T_38) ? _sdq_chunk_data_vec_T_6 : (&_T_33) ? _sdq_chunk_data_vec_T_5 : (&_T_28) ? _sdq_chunk_data_vec_T_4 : (&_T_23) ? _sdq_chunk_data_vec_T_3 : (&_T_18) ? _sdq_chunk_data_vec_T_2 : (&_T_13) ? _sdq_chunk_data_vec_T_1 : (&_T_161) ? _sdq_chunk_data_vec_T : 8'h0; // @[MemStreamer.scala:74:32, :79:35, :81:{39,45,52,69}]
wire [15:0] io_memwrites_in_bits_data_lo_lo_lo_lo = {sdq_chunk_data_vec_1, sdq_chunk_data_vec_0}; // @[MemStreamer.scala:79:35, :83:51]
wire [15:0] io_memwrites_in_bits_data_lo_lo_lo_hi = {sdq_chunk_data_vec_3, sdq_chunk_data_vec_2}; // @[MemStreamer.scala:79:35, :83:51]
wire [31:0] io_memwrites_in_bits_data_lo_lo_lo = {io_memwrites_in_bits_data_lo_lo_lo_hi, io_memwrites_in_bits_data_lo_lo_lo_lo}; // @[MemStreamer.scala:83:51]
wire [15:0] io_memwrites_in_bits_data_lo_lo_hi_lo = {sdq_chunk_data_vec_5, sdq_chunk_data_vec_4}; // @[MemStreamer.scala:79:35, :83:51]
wire [15:0] io_memwrites_in_bits_data_lo_lo_hi_hi = {sdq_chunk_data_vec_7, sdq_chunk_data_vec_6}; // @[MemStreamer.scala:79:35, :83:51]
wire [31:0] io_memwrites_in_bits_data_lo_lo_hi = {io_memwrites_in_bits_data_lo_lo_hi_hi, io_memwrites_in_bits_data_lo_lo_hi_lo}; // @[MemStreamer.scala:83:51]
wire [63:0] io_memwrites_in_bits_data_lo_lo = {io_memwrites_in_bits_data_lo_lo_hi, io_memwrites_in_bits_data_lo_lo_lo}; // @[MemStreamer.scala:83:51]
wire [15:0] io_memwrites_in_bits_data_lo_hi_lo_lo = {sdq_chunk_data_vec_9, sdq_chunk_data_vec_8}; // @[MemStreamer.scala:79:35, :83:51]
wire [15:0] io_memwrites_in_bits_data_lo_hi_lo_hi = {sdq_chunk_data_vec_11, sdq_chunk_data_vec_10}; // @[MemStreamer.scala:79:35, :83:51]
wire [31:0] io_memwrites_in_bits_data_lo_hi_lo = {io_memwrites_in_bits_data_lo_hi_lo_hi, io_memwrites_in_bits_data_lo_hi_lo_lo}; // @[MemStreamer.scala:83:51]
wire [15:0] io_memwrites_in_bits_data_lo_hi_hi_lo = {sdq_chunk_data_vec_13, sdq_chunk_data_vec_12}; // @[MemStreamer.scala:79:35, :83:51]
wire [15:0] io_memwrites_in_bits_data_lo_hi_hi_hi = {sdq_chunk_data_vec_15, sdq_chunk_data_vec_14}; // @[MemStreamer.scala:79:35, :83:51]
wire [31:0] io_memwrites_in_bits_data_lo_hi_hi = {io_memwrites_in_bits_data_lo_hi_hi_hi, io_memwrites_in_bits_data_lo_hi_hi_lo}; // @[MemStreamer.scala:83:51]
wire [63:0] io_memwrites_in_bits_data_lo_hi = {io_memwrites_in_bits_data_lo_hi_hi, io_memwrites_in_bits_data_lo_hi_lo}; // @[MemStreamer.scala:83:51]
wire [127:0] io_memwrites_in_bits_data_lo = {io_memwrites_in_bits_data_lo_hi, io_memwrites_in_bits_data_lo_lo}; // @[MemStreamer.scala:83:51]
wire [15:0] io_memwrites_in_bits_data_hi_lo_lo_lo = {sdq_chunk_data_vec_17, sdq_chunk_data_vec_16}; // @[MemStreamer.scala:79:35, :83:51]
wire [15:0] io_memwrites_in_bits_data_hi_lo_lo_hi = {sdq_chunk_data_vec_19, sdq_chunk_data_vec_18}; // @[MemStreamer.scala:79:35, :83:51]
wire [31:0] io_memwrites_in_bits_data_hi_lo_lo = {io_memwrites_in_bits_data_hi_lo_lo_hi, io_memwrites_in_bits_data_hi_lo_lo_lo}; // @[MemStreamer.scala:83:51]
wire [15:0] io_memwrites_in_bits_data_hi_lo_hi_lo = {sdq_chunk_data_vec_21, sdq_chunk_data_vec_20}; // @[MemStreamer.scala:79:35, :83:51]
wire [15:0] io_memwrites_in_bits_data_hi_lo_hi_hi = {sdq_chunk_data_vec_23, sdq_chunk_data_vec_22}; // @[MemStreamer.scala:79:35, :83:51]
wire [31:0] io_memwrites_in_bits_data_hi_lo_hi = {io_memwrites_in_bits_data_hi_lo_hi_hi, io_memwrites_in_bits_data_hi_lo_hi_lo}; // @[MemStreamer.scala:83:51]
wire [63:0] io_memwrites_in_bits_data_hi_lo = {io_memwrites_in_bits_data_hi_lo_hi, io_memwrites_in_bits_data_hi_lo_lo}; // @[MemStreamer.scala:83:51]
wire [15:0] io_memwrites_in_bits_data_hi_hi_lo_lo = {sdq_chunk_data_vec_25, sdq_chunk_data_vec_24}; // @[MemStreamer.scala:79:35, :83:51]
wire [15:0] io_memwrites_in_bits_data_hi_hi_lo_hi = {sdq_chunk_data_vec_27, sdq_chunk_data_vec_26}; // @[MemStreamer.scala:79:35, :83:51]
wire [31:0] io_memwrites_in_bits_data_hi_hi_lo = {io_memwrites_in_bits_data_hi_hi_lo_hi, io_memwrites_in_bits_data_hi_hi_lo_lo}; // @[MemStreamer.scala:83:51]
wire [15:0] io_memwrites_in_bits_data_hi_hi_hi_lo = {sdq_chunk_data_vec_29, sdq_chunk_data_vec_28}; // @[MemStreamer.scala:79:35, :83:51]
wire [15:0] io_memwrites_in_bits_data_hi_hi_hi_hi = {sdq_chunk_data_vec_31, sdq_chunk_data_vec_30}; // @[MemStreamer.scala:79:35, :83:51]
wire [31:0] io_memwrites_in_bits_data_hi_hi_hi = {io_memwrites_in_bits_data_hi_hi_hi_hi, io_memwrites_in_bits_data_hi_hi_hi_lo}; // @[MemStreamer.scala:83:51]
wire [63:0] io_memwrites_in_bits_data_hi_hi = {io_memwrites_in_bits_data_hi_hi_hi, io_memwrites_in_bits_data_hi_hi_lo}; // @[MemStreamer.scala:83:51]
wire [127:0] io_memwrites_in_bits_data_hi = {io_memwrites_in_bits_data_hi_hi, io_memwrites_in_bits_data_hi_lo}; // @[MemStreamer.scala:83:51]
assign _io_memwrites_in_bits_data_T = {io_memwrites_in_bits_data_hi, io_memwrites_in_bits_data_lo}; // @[MemStreamer.scala:83:51]
assign load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_bits_data_0 = _io_memwrites_in_bits_data_T; // @[MemStreamer.scala:83:51]
reg [63:0] allargs_0_2; // @[Logger.scala:37:33]
wire [64:0] _loginfo_cycles_T_4 = {1'h0, allargs_0_2} + 65'h1; // @[Logger.scala:37:33, :38:38]
wire [63:0] _loginfo_cycles_T_5 = _loginfo_cycles_T_4[63:0]; // @[Logger.scala:38:38]
reg [63:0] allargs_0_3; // @[Logger.scala:37:33]
wire [64:0] _loginfo_cycles_T_6 = {1'h0, allargs_0_3} + 65'h1; // @[Logger.scala:37:33, :38:38]
wire [63:0] _loginfo_cycles_T_7 = _loginfo_cycles_T_6[63:0]; // @[Logger.scala:38:38]
wire [5:0] _store_data_queue_io_enq_bits_chunk_size_bytes_T_93; // @[AES256ECB.scala:87:60] |
Generate the Verilog code corresponding to the following Chisel files.
File Fragmenter.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressSet, BufferParams, IdRange, TransferSizes}
import freechips.rocketchip.util.{Repeater, OH1ToUInt, UIntToOH1}
import scala.math.min
import freechips.rocketchip.util.DataToAugmentedData
object EarlyAck {
sealed trait T
case object AllPuts extends T
case object PutFulls extends T
case object None extends T
}
// minSize: minimum size of transfers supported by all outward managers
// maxSize: maximum size of transfers supported after the Fragmenter is applied
// alwaysMin: fragment all requests down to minSize (else fragment to maximum supported by manager)
// earlyAck: should a multibeat Put should be acknowledged on the first beat or last beat
// holdFirstDeny: allow the Fragmenter to unsafely combine multibeat Gets by taking the first denied for the whole burst
// nameSuffix: appends a suffix to the module name
// Fragmenter modifies: PutFull, PutPartial, LogicalData, Get, Hint
// Fragmenter passes: ArithmeticData (truncated to minSize if alwaysMin)
// Fragmenter cannot modify acquire (could livelock); thus it is unsafe to put caches on both sides
class TLFragmenter(val minSize: Int, val maxSize: Int, val alwaysMin: Boolean = false, val earlyAck: EarlyAck.T = EarlyAck.None, val holdFirstDeny: Boolean = false, val nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule
{
require(isPow2 (maxSize), s"TLFragmenter expects pow2(maxSize), but got $maxSize")
require(isPow2 (minSize), s"TLFragmenter expects pow2(minSize), but got $minSize")
require(minSize <= maxSize, s"TLFragmenter expects min <= max, but got $minSize > $maxSize")
val fragmentBits = log2Ceil(maxSize / minSize)
val fullBits = if (earlyAck == EarlyAck.PutFulls) 1 else 0
val toggleBits = 1
val addedBits = fragmentBits + toggleBits + fullBits
def expandTransfer(x: TransferSizes, op: String) = if (!x) x else {
// validate that we can apply the fragmenter correctly
require (x.max >= minSize, s"TLFragmenter (with parent $parent) max transfer size $op(${x.max}) must be >= min transfer size (${minSize})")
TransferSizes(x.min, maxSize)
}
private def noChangeRequired = minSize == maxSize
private def shrinkTransfer(x: TransferSizes) =
if (!alwaysMin) x
else if (x.min <= minSize) TransferSizes(x.min, min(minSize, x.max))
else TransferSizes.none
private def mapManager(m: TLSlaveParameters) = m.v1copy(
supportsArithmetic = shrinkTransfer(m.supportsArithmetic),
supportsLogical = shrinkTransfer(m.supportsLogical),
supportsGet = expandTransfer(m.supportsGet, "Get"),
supportsPutFull = expandTransfer(m.supportsPutFull, "PutFull"),
supportsPutPartial = expandTransfer(m.supportsPutPartial, "PutParital"),
supportsHint = expandTransfer(m.supportsHint, "Hint"))
val node = new TLAdapterNode(
// We require that all the responses are mutually FIFO
// Thus we need to compact all of the masters into one big master
clientFn = { c => (if (noChangeRequired) c else c.v2copy(
masters = Seq(TLMasterParameters.v2(
name = "TLFragmenter",
sourceId = IdRange(0, if (minSize == maxSize) c.endSourceId else (c.endSourceId << addedBits)),
requestFifo = true,
emits = TLMasterToSlaveTransferSizes(
acquireT = shrinkTransfer(c.masters.map(_.emits.acquireT) .reduce(_ mincover _)),
acquireB = shrinkTransfer(c.masters.map(_.emits.acquireB) .reduce(_ mincover _)),
arithmetic = shrinkTransfer(c.masters.map(_.emits.arithmetic).reduce(_ mincover _)),
logical = shrinkTransfer(c.masters.map(_.emits.logical) .reduce(_ mincover _)),
get = shrinkTransfer(c.masters.map(_.emits.get) .reduce(_ mincover _)),
putFull = shrinkTransfer(c.masters.map(_.emits.putFull) .reduce(_ mincover _)),
putPartial = shrinkTransfer(c.masters.map(_.emits.putPartial).reduce(_ mincover _)),
hint = shrinkTransfer(c.masters.map(_.emits.hint) .reduce(_ mincover _))
)
))
))},
managerFn = { m => if (noChangeRequired) m else m.v2copy(slaves = m.slaves.map(mapManager)) }
) {
override def circuitIdentity = noChangeRequired
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = (Seq("TLFragmenter") ++ nameSuffix).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
if (noChangeRequired) {
out <> in
} else {
// All managers must share a common FIFO domain (responses might end up interleaved)
val manager = edgeOut.manager
val managers = manager.managers
val beatBytes = manager.beatBytes
val fifoId = managers(0).fifoId
require (fifoId.isDefined && managers.map(_.fifoId == fifoId).reduce(_ && _))
require (!manager.anySupportAcquireB || !edgeOut.client.anySupportProbe,
s"TLFragmenter (with parent $parent) can't fragment a caching client's requests into a cacheable region")
require (minSize >= beatBytes, s"TLFragmenter (with parent $parent) can't support fragmenting ($minSize) to sub-beat ($beatBytes) accesses")
// We can't support devices which are cached on both sides of us
require (!edgeOut.manager.anySupportAcquireB || !edgeIn.client.anySupportProbe)
// We can't support denied because we reassemble fragments
require (!edgeOut.manager.mayDenyGet || holdFirstDeny, s"TLFragmenter (with parent $parent) can't support denials without holdFirstDeny=true")
require (!edgeOut.manager.mayDenyPut || earlyAck == EarlyAck.None)
/* The Fragmenter is a bit tricky, because there are 5 sizes in play:
* max size -- the maximum transfer size possible
* orig size -- the original pre-fragmenter size
* frag size -- the modified post-fragmenter size
* min size -- the threshold below which frag=orig
* beat size -- the amount transfered on any given beat
*
* The relationships are as follows:
* max >= orig >= frag
* max > min >= beat
* It IS possible that orig <= min (then frag=orig; ie: no fragmentation)
*
* The fragment# (sent via TL.source) is measured in multiples of min size.
* Meanwhile, to track the progress, counters measure in multiples of beat size.
*
* Here is an example of a bus with max=256, min=8, beat=4 and a device supporting 16.
*
* in.A out.A (frag#) out.D (frag#) in.D gen# ack#
* get64 get16 6 ackD16 6 ackD64 12 15
* ackD16 6 ackD64 14
* ackD16 6 ackD64 13
* ackD16 6 ackD64 12
* get16 4 ackD16 4 ackD64 8 11
* ackD16 4 ackD64 10
* ackD16 4 ackD64 9
* ackD16 4 ackD64 8
* get16 2 ackD16 2 ackD64 4 7
* ackD16 2 ackD64 6
* ackD16 2 ackD64 5
* ackD16 2 ackD64 4
* get16 0 ackD16 0 ackD64 0 3
* ackD16 0 ackD64 2
* ackD16 0 ackD64 1
* ackD16 0 ackD64 0
*
* get8 get8 0 ackD8 0 ackD8 0 1
* ackD8 0 ackD8 0
*
* get4 get4 0 ackD4 0 ackD4 0 0
* get1 get1 0 ackD1 0 ackD1 0 0
*
* put64 put16 6 15
* put64 put16 6 14
* put64 put16 6 13
* put64 put16 6 ack16 6 12 12
* put64 put16 4 11
* put64 put16 4 10
* put64 put16 4 9
* put64 put16 4 ack16 4 8 8
* put64 put16 2 7
* put64 put16 2 6
* put64 put16 2 5
* put64 put16 2 ack16 2 4 4
* put64 put16 0 3
* put64 put16 0 2
* put64 put16 0 1
* put64 put16 0 ack16 0 ack64 0 0
*
* put8 put8 0 1
* put8 put8 0 ack8 0 ack8 0 0
*
* put4 put4 0 ack4 0 ack4 0 0
* put1 put1 0 ack1 0 ack1 0 0
*/
val counterBits = log2Up(maxSize/beatBytes)
val maxDownSize = if (alwaysMin) minSize else min(manager.maxTransfer, maxSize)
// Consider the following waveform for two 4-beat bursts:
// ---A----A------------
// -------D-----DDD-DDDD
// Under TL rules, the second A can use the same source as the first A,
// because the source is released for reuse on the first response beat.
//
// However, if we fragment the requests, it looks like this:
// ---3210-3210---------
// -------3-----210-3210
// ... now we've broken the rules because 210 are twice inflight.
//
// This phenomenon means we can have essentially 2*maxSize/minSize-1
// fragmented transactions in flight per original transaction source.
//
// To keep the source unique, we encode the beat counter in the low
// bits of the source. To solve the overlap, we use a toggle bit.
// Whatever toggle bit the D is reassembling, A will use the opposite.
// First, handle the return path
val acknum = RegInit(0.U(counterBits.W))
val dOrig = Reg(UInt())
val dToggle = RegInit(false.B)
val dFragnum = out.d.bits.source(fragmentBits-1, 0)
val dFirst = acknum === 0.U
val dLast = dFragnum === 0.U // only for AccessAck (!Data)
val dsizeOH = UIntToOH (out.d.bits.size, log2Ceil(maxDownSize)+1)
val dsizeOH1 = UIntToOH1(out.d.bits.size, log2Up(maxDownSize))
val dHasData = edgeOut.hasData(out.d.bits)
// calculate new acknum
val acknum_fragment = dFragnum << log2Ceil(minSize/beatBytes)
val acknum_size = dsizeOH1 >> log2Ceil(beatBytes)
assert (!out.d.valid || (acknum_fragment & acknum_size) === 0.U)
val dFirst_acknum = acknum_fragment | Mux(dHasData, acknum_size, 0.U)
val ack_decrement = Mux(dHasData, 1.U, dsizeOH >> log2Ceil(beatBytes))
// calculate the original size
val dFirst_size = OH1ToUInt((dFragnum << log2Ceil(minSize)) | dsizeOH1)
when (out.d.fire) {
acknum := Mux(dFirst, dFirst_acknum, acknum - ack_decrement)
when (dFirst) {
dOrig := dFirst_size
dToggle := out.d.bits.source(fragmentBits)
}
}
// Swallow up non-data ack fragments
val doEarlyAck = earlyAck match {
case EarlyAck.AllPuts => true.B
case EarlyAck.PutFulls => out.d.bits.source(fragmentBits+1)
case EarlyAck.None => false.B
}
val drop = !dHasData && !Mux(doEarlyAck, dFirst, dLast)
out.d.ready := in.d.ready || drop
in.d.valid := out.d.valid && !drop
in.d.bits := out.d.bits // pass most stuff unchanged
in.d.bits.source := out.d.bits.source >> addedBits
in.d.bits.size := Mux(dFirst, dFirst_size, dOrig)
if (edgeOut.manager.mayDenyPut) {
val r_denied = Reg(Bool())
val d_denied = (!dFirst && r_denied) || out.d.bits.denied
when (out.d.fire) { r_denied := d_denied }
in.d.bits.denied := d_denied
}
if (edgeOut.manager.mayDenyGet) {
// Take denied only from the first beat and hold that value
val d_denied = out.d.bits.denied holdUnless dFirst
when (dHasData) {
in.d.bits.denied := d_denied
in.d.bits.corrupt := d_denied || out.d.bits.corrupt
}
}
// What maximum transfer sizes do downstream devices support?
val maxArithmetics = managers.map(_.supportsArithmetic.max)
val maxLogicals = managers.map(_.supportsLogical.max)
val maxGets = managers.map(_.supportsGet.max)
val maxPutFulls = managers.map(_.supportsPutFull.max)
val maxPutPartials = managers.map(_.supportsPutPartial.max)
val maxHints = managers.map(m => if (m.supportsHint) maxDownSize else 0)
// We assume that the request is valid => size 0 is impossible
val lgMinSize = log2Ceil(minSize).U
val maxLgArithmetics = maxArithmetics.map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgLogicals = maxLogicals .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgGets = maxGets .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgPutFulls = maxPutFulls .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgPutPartials = maxPutPartials.map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgHints = maxHints .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
// Make the request repeatable
val repeater = Module(new Repeater(in.a.bits))
repeater.io.enq <> in.a
val in_a = repeater.io.deq
// If this is infront of a single manager, these become constants
val find = manager.findFast(edgeIn.address(in_a.bits))
val maxLgArithmetic = Mux1H(find, maxLgArithmetics)
val maxLgLogical = Mux1H(find, maxLgLogicals)
val maxLgGet = Mux1H(find, maxLgGets)
val maxLgPutFull = Mux1H(find, maxLgPutFulls)
val maxLgPutPartial = Mux1H(find, maxLgPutPartials)
val maxLgHint = Mux1H(find, maxLgHints)
val limit = if (alwaysMin) lgMinSize else
MuxLookup(in_a.bits.opcode, lgMinSize)(Array(
TLMessages.PutFullData -> maxLgPutFull,
TLMessages.PutPartialData -> maxLgPutPartial,
TLMessages.ArithmeticData -> maxLgArithmetic,
TLMessages.LogicalData -> maxLgLogical,
TLMessages.Get -> maxLgGet,
TLMessages.Hint -> maxLgHint))
val aOrig = in_a.bits.size
val aFrag = Mux(aOrig > limit, limit, aOrig)
val aOrigOH1 = UIntToOH1(aOrig, log2Ceil(maxSize))
val aFragOH1 = UIntToOH1(aFrag, log2Up(maxDownSize))
val aHasData = edgeIn.hasData(in_a.bits)
val aMask = Mux(aHasData, 0.U, aFragOH1)
val gennum = RegInit(0.U(counterBits.W))
val aFirst = gennum === 0.U
val old_gennum1 = Mux(aFirst, aOrigOH1 >> log2Ceil(beatBytes), gennum - 1.U)
val new_gennum = ~(~old_gennum1 | (aMask >> log2Ceil(beatBytes))) // ~(~x|y) is width safe
val aFragnum = ~(~(old_gennum1 >> log2Ceil(minSize/beatBytes)) | (aFragOH1 >> log2Ceil(minSize)))
val aLast = aFragnum === 0.U
val aToggle = !Mux(aFirst, dToggle, RegEnable(dToggle, aFirst))
val aFull = if (earlyAck == EarlyAck.PutFulls) Some(in_a.bits.opcode === TLMessages.PutFullData) else None
when (out.a.fire) { gennum := new_gennum }
repeater.io.repeat := !aHasData && aFragnum =/= 0.U
out.a <> in_a
out.a.bits.address := in_a.bits.address | ~(old_gennum1 << log2Ceil(beatBytes) | ~aOrigOH1 | aFragOH1 | (minSize-1).U)
out.a.bits.source := Cat(Seq(in_a.bits.source) ++ aFull ++ Seq(aToggle.asUInt, aFragnum))
out.a.bits.size := aFrag
// Optimize away some of the Repeater's registers
assert (!repeater.io.full || !aHasData)
out.a.bits.data := in.a.bits.data
val fullMask = ((BigInt(1) << beatBytes) - 1).U
assert (!repeater.io.full || in_a.bits.mask === fullMask)
out.a.bits.mask := Mux(repeater.io.full, fullMask, in.a.bits.mask)
out.a.bits.user.waiveAll :<= in.a.bits.user.subset(_.isData)
// Tie off unused channels
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLFragmenter
{
def apply(minSize: Int, maxSize: Int, alwaysMin: Boolean = false, earlyAck: EarlyAck.T = EarlyAck.None, holdFirstDeny: Boolean = false, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode =
{
if (minSize <= maxSize) {
val fragmenter = LazyModule(new TLFragmenter(minSize, maxSize, alwaysMin, earlyAck, holdFirstDeny, nameSuffix))
fragmenter.node
} else { TLEphemeralNode()(ValName("no_fragmenter")) }
}
def apply(wrapper: TLBusWrapper, nameSuffix: Option[String])(implicit p: Parameters): TLNode = apply(wrapper.beatBytes, wrapper.blockBytes, nameSuffix = nameSuffix)
def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper, None)
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMFragmenter(ramBeatBytes: Int, maxSize: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("Fragmenter"))
val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff), beatBytes = ramBeatBytes))
(ram.node
:= TLDelayer(0.1)
:= TLBuffer(BufferParams.flow)
:= TLDelayer(0.1)
:= TLFragmenter(ramBeatBytes, maxSize, earlyAck = EarlyAck.AllPuts)
:= TLDelayer(0.1)
:= TLBuffer(BufferParams.flow)
:= TLFragmenter(ramBeatBytes, maxSize/2)
:= TLDelayer(0.1)
:= TLBuffer(BufferParams.flow)
:= model.node
:= fuzz.node)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMFragmenterTest(ramBeatBytes: Int, maxSize: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMFragmenter(ramBeatBytes,maxSize,txns)).module)
io.finished := dut.io.finished
dut.io.start := io.start
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.diplomacy.{
AddressDecoder, AddressSet, BufferParams, DirectedBuffers, IdMap, IdMapEntry,
IdRange, RegionType, TransferSizes
}
import freechips.rocketchip.resources.{Resource, ResourceAddress, ResourcePermissions}
import freechips.rocketchip.util.{
AsyncQueueParams, BundleField, BundleFieldBase, BundleKeyBase,
CreditedDelay, groupByIntoSeq, RationalDirection, SimpleProduct
}
import scala.math.max
//These transfer sizes describe requests issued from masters on the A channel that will be responded by slaves on the D channel
case class TLMasterToSlaveTransferSizes(
// Supports both Acquire+Release of the following two sizes:
acquireT: TransferSizes = TransferSizes.none,
acquireB: TransferSizes = TransferSizes.none,
arithmetic: TransferSizes = TransferSizes.none,
logical: TransferSizes = TransferSizes.none,
get: TransferSizes = TransferSizes.none,
putFull: TransferSizes = TransferSizes.none,
putPartial: TransferSizes = TransferSizes.none,
hint: TransferSizes = TransferSizes.none)
extends TLCommonTransferSizes {
def intersect(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes(
acquireT = acquireT .intersect(rhs.acquireT),
acquireB = acquireB .intersect(rhs.acquireB),
arithmetic = arithmetic.intersect(rhs.arithmetic),
logical = logical .intersect(rhs.logical),
get = get .intersect(rhs.get),
putFull = putFull .intersect(rhs.putFull),
putPartial = putPartial.intersect(rhs.putPartial),
hint = hint .intersect(rhs.hint))
def mincover(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes(
acquireT = acquireT .mincover(rhs.acquireT),
acquireB = acquireB .mincover(rhs.acquireB),
arithmetic = arithmetic.mincover(rhs.arithmetic),
logical = logical .mincover(rhs.logical),
get = get .mincover(rhs.get),
putFull = putFull .mincover(rhs.putFull),
putPartial = putPartial.mincover(rhs.putPartial),
hint = hint .mincover(rhs.hint))
// Reduce rendering to a simple yes/no per field
override def toString = {
def str(x: TransferSizes, flag: String) = if (x.none) "" else flag
def flags = Vector(
str(acquireT, "T"),
str(acquireB, "B"),
str(arithmetic, "A"),
str(logical, "L"),
str(get, "G"),
str(putFull, "F"),
str(putPartial, "P"),
str(hint, "H"))
flags.mkString
}
// Prints out the actual information in a user readable way
def infoString = {
s"""acquireT = ${acquireT}
|acquireB = ${acquireB}
|arithmetic = ${arithmetic}
|logical = ${logical}
|get = ${get}
|putFull = ${putFull}
|putPartial = ${putPartial}
|hint = ${hint}
|
|""".stripMargin
}
}
object TLMasterToSlaveTransferSizes {
def unknownEmits = TLMasterToSlaveTransferSizes(
acquireT = TransferSizes(1, 4096),
acquireB = TransferSizes(1, 4096),
arithmetic = TransferSizes(1, 4096),
logical = TransferSizes(1, 4096),
get = TransferSizes(1, 4096),
putFull = TransferSizes(1, 4096),
putPartial = TransferSizes(1, 4096),
hint = TransferSizes(1, 4096))
def unknownSupports = TLMasterToSlaveTransferSizes()
}
//These transfer sizes describe requests issued from slaves on the B channel that will be responded by masters on the C channel
case class TLSlaveToMasterTransferSizes(
probe: TransferSizes = TransferSizes.none,
arithmetic: TransferSizes = TransferSizes.none,
logical: TransferSizes = TransferSizes.none,
get: TransferSizes = TransferSizes.none,
putFull: TransferSizes = TransferSizes.none,
putPartial: TransferSizes = TransferSizes.none,
hint: TransferSizes = TransferSizes.none
) extends TLCommonTransferSizes {
def intersect(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes(
probe = probe .intersect(rhs.probe),
arithmetic = arithmetic.intersect(rhs.arithmetic),
logical = logical .intersect(rhs.logical),
get = get .intersect(rhs.get),
putFull = putFull .intersect(rhs.putFull),
putPartial = putPartial.intersect(rhs.putPartial),
hint = hint .intersect(rhs.hint)
)
def mincover(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes(
probe = probe .mincover(rhs.probe),
arithmetic = arithmetic.mincover(rhs.arithmetic),
logical = logical .mincover(rhs.logical),
get = get .mincover(rhs.get),
putFull = putFull .mincover(rhs.putFull),
putPartial = putPartial.mincover(rhs.putPartial),
hint = hint .mincover(rhs.hint)
)
// Reduce rendering to a simple yes/no per field
override def toString = {
def str(x: TransferSizes, flag: String) = if (x.none) "" else flag
def flags = Vector(
str(probe, "P"),
str(arithmetic, "A"),
str(logical, "L"),
str(get, "G"),
str(putFull, "F"),
str(putPartial, "P"),
str(hint, "H"))
flags.mkString
}
// Prints out the actual information in a user readable way
def infoString = {
s"""probe = ${probe}
|arithmetic = ${arithmetic}
|logical = ${logical}
|get = ${get}
|putFull = ${putFull}
|putPartial = ${putPartial}
|hint = ${hint}
|
|""".stripMargin
}
}
object TLSlaveToMasterTransferSizes {
def unknownEmits = TLSlaveToMasterTransferSizes(
arithmetic = TransferSizes(1, 4096),
logical = TransferSizes(1, 4096),
get = TransferSizes(1, 4096),
putFull = TransferSizes(1, 4096),
putPartial = TransferSizes(1, 4096),
hint = TransferSizes(1, 4096),
probe = TransferSizes(1, 4096))
def unknownSupports = TLSlaveToMasterTransferSizes()
}
trait TLCommonTransferSizes {
def arithmetic: TransferSizes
def logical: TransferSizes
def get: TransferSizes
def putFull: TransferSizes
def putPartial: TransferSizes
def hint: TransferSizes
}
class TLSlaveParameters private(
val nodePath: Seq[BaseNode],
val resources: Seq[Resource],
setName: Option[String],
val address: Seq[AddressSet],
val regionType: RegionType.T,
val executable: Boolean,
val fifoId: Option[Int],
val supports: TLMasterToSlaveTransferSizes,
val emits: TLSlaveToMasterTransferSizes,
// By default, slaves are forbidden from issuing 'denied' responses (it prevents Fragmentation)
val alwaysGrantsT: Boolean, // typically only true for CacheCork'd read-write devices; dual: neverReleaseData
// If fifoId=Some, all accesses sent to the same fifoId are executed and ACK'd in FIFO order
// Note: you can only rely on this FIFO behaviour if your TLMasterParameters include requestFifo
val mayDenyGet: Boolean, // applies to: AccessAckData, GrantData
val mayDenyPut: Boolean) // applies to: AccessAck, Grant, HintAck
// ReleaseAck may NEVER be denied
extends SimpleProduct
{
def sortedAddress = address.sorted
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlaveParameters]
override def productPrefix = "TLSlaveParameters"
// We intentionally omit nodePath for equality testing / formatting
def productArity: Int = 11
def productElement(n: Int): Any = n match {
case 0 => name
case 1 => address
case 2 => resources
case 3 => regionType
case 4 => executable
case 5 => fifoId
case 6 => supports
case 7 => emits
case 8 => alwaysGrantsT
case 9 => mayDenyGet
case 10 => mayDenyPut
case _ => throw new IndexOutOfBoundsException(n.toString)
}
def supportsAcquireT: TransferSizes = supports.acquireT
def supportsAcquireB: TransferSizes = supports.acquireB
def supportsArithmetic: TransferSizes = supports.arithmetic
def supportsLogical: TransferSizes = supports.logical
def supportsGet: TransferSizes = supports.get
def supportsPutFull: TransferSizes = supports.putFull
def supportsPutPartial: TransferSizes = supports.putPartial
def supportsHint: TransferSizes = supports.hint
require (!address.isEmpty, "Address cannot be empty")
address.foreach { a => require (a.finite, "Address must be finite") }
address.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") }
require (supportsPutFull.contains(supportsPutPartial), s"PutFull($supportsPutFull) < PutPartial($supportsPutPartial)")
require (supportsPutFull.contains(supportsArithmetic), s"PutFull($supportsPutFull) < Arithmetic($supportsArithmetic)")
require (supportsPutFull.contains(supportsLogical), s"PutFull($supportsPutFull) < Logical($supportsLogical)")
require (supportsGet.contains(supportsArithmetic), s"Get($supportsGet) < Arithmetic($supportsArithmetic)")
require (supportsGet.contains(supportsLogical), s"Get($supportsGet) < Logical($supportsLogical)")
require (supportsAcquireB.contains(supportsAcquireT), s"AcquireB($supportsAcquireB) < AcquireT($supportsAcquireT)")
require (!alwaysGrantsT || supportsAcquireT, s"Must supportAcquireT if promising to always grantT")
// Make sure that the regionType agrees with the capabilities
require (!supportsAcquireB || regionType >= RegionType.UNCACHED) // acquire -> uncached, tracked, cached
require (regionType <= RegionType.UNCACHED || supportsAcquireB) // tracked, cached -> acquire
require (regionType != RegionType.UNCACHED || supportsGet) // uncached -> supportsGet
val name = setName.orElse(nodePath.lastOption.map(_.lazyModule.name)).getOrElse("disconnected")
val maxTransfer = List( // Largest supported transfer of all types
supportsAcquireT.max,
supportsAcquireB.max,
supportsArithmetic.max,
supportsLogical.max,
supportsGet.max,
supportsPutFull.max,
supportsPutPartial.max).max
val maxAddress = address.map(_.max).max
val minAlignment = address.map(_.alignment).min
// The device had better not support a transfer larger than its alignment
require (minAlignment >= maxTransfer, s"Bad $address: minAlignment ($minAlignment) must be >= maxTransfer ($maxTransfer)")
def toResource: ResourceAddress = {
ResourceAddress(address, ResourcePermissions(
r = supportsAcquireB || supportsGet,
w = supportsAcquireT || supportsPutFull,
x = executable,
c = supportsAcquireB,
a = supportsArithmetic && supportsLogical))
}
def findTreeViolation() = nodePath.find {
case _: MixedAdapterNode[_, _, _, _, _, _, _, _] => false
case _: SinkNode[_, _, _, _, _] => false
case node => node.inputs.size != 1
}
def isTree = findTreeViolation() == None
def infoString = {
s"""Slave Name = ${name}
|Slave Address = ${address}
|supports = ${supports.infoString}
|
|""".stripMargin
}
def v1copy(
address: Seq[AddressSet] = address,
resources: Seq[Resource] = resources,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
nodePath: Seq[BaseNode] = nodePath,
supportsAcquireT: TransferSizes = supports.acquireT,
supportsAcquireB: TransferSizes = supports.acquireB,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut,
alwaysGrantsT: Boolean = alwaysGrantsT,
fifoId: Option[Int] = fifoId) =
{
new TLSlaveParameters(
setName = setName,
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supports = TLMasterToSlaveTransferSizes(
acquireT = supportsAcquireT,
acquireB = supportsAcquireB,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = emits,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
def v2copy(
nodePath: Seq[BaseNode] = nodePath,
resources: Seq[Resource] = resources,
name: Option[String] = setName,
address: Seq[AddressSet] = address,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
fifoId: Option[Int] = fifoId,
supports: TLMasterToSlaveTransferSizes = supports,
emits: TLSlaveToMasterTransferSizes = emits,
alwaysGrantsT: Boolean = alwaysGrantsT,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut) =
{
new TLSlaveParameters(
nodePath = nodePath,
resources = resources,
setName = name,
address = address,
regionType = regionType,
executable = executable,
fifoId = fifoId,
supports = supports,
emits = emits,
alwaysGrantsT = alwaysGrantsT,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut)
}
@deprecated("Use v1copy instead of copy","")
def copy(
address: Seq[AddressSet] = address,
resources: Seq[Resource] = resources,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
nodePath: Seq[BaseNode] = nodePath,
supportsAcquireT: TransferSizes = supports.acquireT,
supportsAcquireB: TransferSizes = supports.acquireB,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut,
alwaysGrantsT: Boolean = alwaysGrantsT,
fifoId: Option[Int] = fifoId) =
{
v1copy(
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supportsAcquireT = supportsAcquireT,
supportsAcquireB = supportsAcquireB,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
}
object TLSlaveParameters {
def v1(
address: Seq[AddressSet],
resources: Seq[Resource] = Seq(),
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
nodePath: Seq[BaseNode] = Seq(),
supportsAcquireT: TransferSizes = TransferSizes.none,
supportsAcquireB: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false,
alwaysGrantsT: Boolean = false,
fifoId: Option[Int] = None) =
{
new TLSlaveParameters(
setName = None,
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supports = TLMasterToSlaveTransferSizes(
acquireT = supportsAcquireT,
acquireB = supportsAcquireB,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = TLSlaveToMasterTransferSizes.unknownEmits,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
def v2(
address: Seq[AddressSet],
nodePath: Seq[BaseNode] = Seq(),
resources: Seq[Resource] = Seq(),
name: Option[String] = None,
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
fifoId: Option[Int] = None,
supports: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownSupports,
emits: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownEmits,
alwaysGrantsT: Boolean = false,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false) =
{
new TLSlaveParameters(
nodePath = nodePath,
resources = resources,
setName = name,
address = address,
regionType = regionType,
executable = executable,
fifoId = fifoId,
supports = supports,
emits = emits,
alwaysGrantsT = alwaysGrantsT,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut)
}
}
object TLManagerParameters {
@deprecated("Use TLSlaveParameters.v1 instead of TLManagerParameters","")
def apply(
address: Seq[AddressSet],
resources: Seq[Resource] = Seq(),
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
nodePath: Seq[BaseNode] = Seq(),
supportsAcquireT: TransferSizes = TransferSizes.none,
supportsAcquireB: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false,
alwaysGrantsT: Boolean = false,
fifoId: Option[Int] = None) =
TLSlaveParameters.v1(
address,
resources,
regionType,
executable,
nodePath,
supportsAcquireT,
supportsAcquireB,
supportsArithmetic,
supportsLogical,
supportsGet,
supportsPutFull,
supportsPutPartial,
supportsHint,
mayDenyGet,
mayDenyPut,
alwaysGrantsT,
fifoId,
)
}
case class TLChannelBeatBytes(a: Option[Int], b: Option[Int], c: Option[Int], d: Option[Int])
{
def members = Seq(a, b, c, d)
members.collect { case Some(beatBytes) =>
require (isPow2(beatBytes), "Data channel width must be a power of 2")
}
}
object TLChannelBeatBytes{
def apply(beatBytes: Int): TLChannelBeatBytes = TLChannelBeatBytes(
Some(beatBytes),
Some(beatBytes),
Some(beatBytes),
Some(beatBytes))
def apply(): TLChannelBeatBytes = TLChannelBeatBytes(
None,
None,
None,
None)
}
class TLSlavePortParameters private(
val slaves: Seq[TLSlaveParameters],
val channelBytes: TLChannelBeatBytes,
val endSinkId: Int,
val minLatency: Int,
val responseFields: Seq[BundleFieldBase],
val requestKeys: Seq[BundleKeyBase]) extends SimpleProduct
{
def sortedSlaves = slaves.sortBy(_.sortedAddress.head)
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlavePortParameters]
override def productPrefix = "TLSlavePortParameters"
def productArity: Int = 6
def productElement(n: Int): Any = n match {
case 0 => slaves
case 1 => channelBytes
case 2 => endSinkId
case 3 => minLatency
case 4 => responseFields
case 5 => requestKeys
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!slaves.isEmpty, "Slave ports must have slaves")
require (endSinkId >= 0, "Sink ids cannot be negative")
require (minLatency >= 0, "Minimum required latency cannot be negative")
// Using this API implies you cannot handle mixed-width busses
def beatBytes = {
channelBytes.members.foreach { width =>
require (width.isDefined && width == channelBytes.a)
}
channelBytes.a.get
}
// TODO this should be deprecated
def managers = slaves
def requireFifo(policy: TLFIFOFixer.Policy = TLFIFOFixer.allFIFO) = {
val relevant = slaves.filter(m => policy(m))
relevant.foreach { m =>
require(m.fifoId == relevant.head.fifoId, s"${m.name} had fifoId ${m.fifoId}, which was not homogeneous (${slaves.map(s => (s.name, s.fifoId))}) ")
}
}
// Bounds on required sizes
def maxAddress = slaves.map(_.maxAddress).max
def maxTransfer = slaves.map(_.maxTransfer).max
def mayDenyGet = slaves.exists(_.mayDenyGet)
def mayDenyPut = slaves.exists(_.mayDenyPut)
// Diplomatically determined operation sizes emitted by all outward Slaves
// as opposed to emits* which generate circuitry to check which specific addresses
val allEmitClaims = slaves.map(_.emits).reduce( _ intersect _)
// Operation Emitted by at least one outward Slaves
// as opposed to emits* which generate circuitry to check which specific addresses
val anyEmitClaims = slaves.map(_.emits).reduce(_ mincover _)
// Diplomatically determined operation sizes supported by all outward Slaves
// as opposed to supports* which generate circuitry to check which specific addresses
val allSupportClaims = slaves.map(_.supports).reduce( _ intersect _)
val allSupportAcquireT = allSupportClaims.acquireT
val allSupportAcquireB = allSupportClaims.acquireB
val allSupportArithmetic = allSupportClaims.arithmetic
val allSupportLogical = allSupportClaims.logical
val allSupportGet = allSupportClaims.get
val allSupportPutFull = allSupportClaims.putFull
val allSupportPutPartial = allSupportClaims.putPartial
val allSupportHint = allSupportClaims.hint
// Operation supported by at least one outward Slaves
// as opposed to supports* which generate circuitry to check which specific addresses
val anySupportClaims = slaves.map(_.supports).reduce(_ mincover _)
val anySupportAcquireT = !anySupportClaims.acquireT.none
val anySupportAcquireB = !anySupportClaims.acquireB.none
val anySupportArithmetic = !anySupportClaims.arithmetic.none
val anySupportLogical = !anySupportClaims.logical.none
val anySupportGet = !anySupportClaims.get.none
val anySupportPutFull = !anySupportClaims.putFull.none
val anySupportPutPartial = !anySupportClaims.putPartial.none
val anySupportHint = !anySupportClaims.hint.none
// Supporting Acquire means being routable for GrantAck
require ((endSinkId == 0) == !anySupportAcquireB)
// These return Option[TLSlaveParameters] for your convenience
def find(address: BigInt) = slaves.find(_.address.exists(_.contains(address)))
// The safe version will check the entire address
def findSafe(address: UInt) = VecInit(sortedSlaves.map(_.address.map(_.contains(address)).reduce(_ || _)))
// The fast version assumes the address is valid (you probably want fastProperty instead of this function)
def findFast(address: UInt) = {
val routingMask = AddressDecoder(slaves.map(_.address))
VecInit(sortedSlaves.map(_.address.map(_.widen(~routingMask)).distinct.map(_.contains(address)).reduce(_ || _)))
}
// Compute the simplest AddressSets that decide a key
def fastPropertyGroup[K](p: TLSlaveParameters => K): Seq[(K, Seq[AddressSet])] = {
val groups = groupByIntoSeq(sortedSlaves.map(m => (p(m), m.address)))( _._1).map { case (k, vs) =>
k -> vs.flatMap(_._2)
}
val reductionMask = AddressDecoder(groups.map(_._2))
groups.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~reductionMask)).distinct) }
}
// Select a property
def fastProperty[K, D <: Data](address: UInt, p: TLSlaveParameters => K, d: K => D): D =
Mux1H(fastPropertyGroup(p).map { case (v, a) => (a.map(_.contains(address)).reduce(_||_), d(v)) })
// Note: returns the actual fifoId + 1 or 0 if None
def findFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.map(_+1).getOrElse(0), (i:Int) => i.U)
def hasFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.isDefined, (b:Boolean) => b.B)
// Does this Port manage this ID/address?
def containsSafe(address: UInt) = findSafe(address).reduce(_ || _)
private def addressHelper(
// setting safe to false indicates that all addresses are expected to be legal, which might reduce circuit complexity
safe: Boolean,
// member filters out the sizes being checked based on the opcode being emitted or supported
member: TLSlaveParameters => TransferSizes,
address: UInt,
lgSize: UInt,
// range provides a limit on the sizes that are expected to be evaluated, which might reduce circuit complexity
range: Option[TransferSizes]): Bool = {
// trim reduces circuit complexity by intersecting checked sizes with the range argument
def trim(x: TransferSizes) = range.map(_.intersect(x)).getOrElse(x)
// groupBy returns an unordered map, convert back to Seq and sort the result for determinism
// groupByIntoSeq is turning slaves into trimmed membership sizes
// We are grouping all the slaves by their transfer size where
// if they support the trimmed size then
// member is the type of transfer that you are looking for (What you are trying to filter on)
// When you consider membership, you are trimming the sizes to only the ones that you care about
// you are filtering the slaves based on both whether they support a particular opcode and the size
// Grouping the slaves based on the actual transfer size range they support
// intersecting the range and checking their membership
// FOR SUPPORTCASES instead of returning the list of slaves,
// you are returning a map from transfer size to the set of
// address sets that are supported for that transfer size
// find all the slaves that support a certain type of operation and then group their addresses by the supported size
// for every size there could be multiple address ranges
// safety is a trade off between checking between all possible addresses vs only the addresses
// that are known to have supported sizes
// the trade off is 'checking all addresses is a more expensive circuit but will always give you
// the right answer even if you give it an illegal address'
// the not safe version is a cheaper circuit but if you give it an illegal address then it might produce the wrong answer
// fast presumes address legality
// This groupByIntoSeq deterministically groups all address sets for which a given `member` transfer size applies.
// In the resulting Map of cases, the keys are transfer sizes and the values are all address sets which emit or support that size.
val supportCases = groupByIntoSeq(slaves)(m => trim(member(m))).map { case (k: TransferSizes, vs: Seq[TLSlaveParameters]) =>
k -> vs.flatMap(_.address)
}
// safe produces a circuit that compares against all possible addresses,
// whereas fast presumes that the address is legal but uses an efficient address decoder
val mask = if (safe) ~BigInt(0) else AddressDecoder(supportCases.map(_._2))
// Simplified creates the most concise possible representation of each cases' address sets based on the mask.
val simplified = supportCases.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~mask)).distinct) }
simplified.map { case (s, a) =>
// s is a size, you are checking for this size either the size of the operation is in s
// We return an or-reduction of all the cases, checking whether any contains both the dynamic size and dynamic address on the wire.
((Some(s) == range).B || s.containsLg(lgSize)) &&
a.map(_.contains(address)).reduce(_||_)
}.foldLeft(false.B)(_||_)
}
def supportsAcquireTSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireT, address, lgSize, range)
def supportsAcquireBSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireB, address, lgSize, range)
def supportsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.arithmetic, address, lgSize, range)
def supportsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.logical, address, lgSize, range)
def supportsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.get, address, lgSize, range)
def supportsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putFull, address, lgSize, range)
def supportsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putPartial, address, lgSize, range)
def supportsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.hint, address, lgSize, range)
def supportsAcquireTFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireT, address, lgSize, range)
def supportsAcquireBFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireB, address, lgSize, range)
def supportsArithmeticFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.arithmetic, address, lgSize, range)
def supportsLogicalFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.logical, address, lgSize, range)
def supportsGetFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.get, address, lgSize, range)
def supportsPutFullFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putFull, address, lgSize, range)
def supportsPutPartialFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putPartial, address, lgSize, range)
def supportsHintFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.hint, address, lgSize, range)
def emitsProbeSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.probe, address, lgSize, range)
def emitsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.arithmetic, address, lgSize, range)
def emitsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.logical, address, lgSize, range)
def emitsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.get, address, lgSize, range)
def emitsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putFull, address, lgSize, range)
def emitsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putPartial, address, lgSize, range)
def emitsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.hint, address, lgSize, range)
def findTreeViolation() = slaves.flatMap(_.findTreeViolation()).headOption
def isTree = !slaves.exists(!_.isTree)
def infoString = "Slave Port Beatbytes = " + beatBytes + "\n" + "Slave Port MinLatency = " + minLatency + "\n\n" + slaves.map(_.infoString).mkString
def v1copy(
managers: Seq[TLSlaveParameters] = slaves,
beatBytes: Int = -1,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
new TLSlavePortParameters(
slaves = managers,
channelBytes = if (beatBytes != -1) TLChannelBeatBytes(beatBytes) else channelBytes,
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
def v2copy(
slaves: Seq[TLSlaveParameters] = slaves,
channelBytes: TLChannelBeatBytes = channelBytes,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
new TLSlavePortParameters(
slaves = slaves,
channelBytes = channelBytes,
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
@deprecated("Use v1copy instead of copy","")
def copy(
managers: Seq[TLSlaveParameters] = slaves,
beatBytes: Int = -1,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
v1copy(
managers,
beatBytes,
endSinkId,
minLatency,
responseFields,
requestKeys)
}
}
object TLSlavePortParameters {
def v1(
managers: Seq[TLSlaveParameters],
beatBytes: Int,
endSinkId: Int = 0,
minLatency: Int = 0,
responseFields: Seq[BundleFieldBase] = Nil,
requestKeys: Seq[BundleKeyBase] = Nil) =
{
new TLSlavePortParameters(
slaves = managers,
channelBytes = TLChannelBeatBytes(beatBytes),
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
}
object TLManagerPortParameters {
@deprecated("Use TLSlavePortParameters.v1 instead of TLManagerPortParameters","")
def apply(
managers: Seq[TLSlaveParameters],
beatBytes: Int,
endSinkId: Int = 0,
minLatency: Int = 0,
responseFields: Seq[BundleFieldBase] = Nil,
requestKeys: Seq[BundleKeyBase] = Nil) =
{
TLSlavePortParameters.v1(
managers,
beatBytes,
endSinkId,
minLatency,
responseFields,
requestKeys)
}
}
class TLMasterParameters private(
val nodePath: Seq[BaseNode],
val resources: Seq[Resource],
val name: String,
val visibility: Seq[AddressSet],
val unusedRegionTypes: Set[RegionType.T],
val executesOnly: Boolean,
val requestFifo: Boolean, // only a request, not a requirement. applies to A, not C.
val supports: TLSlaveToMasterTransferSizes,
val emits: TLMasterToSlaveTransferSizes,
val neverReleasesData: Boolean,
val sourceId: IdRange) extends SimpleProduct
{
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterParameters]
override def productPrefix = "TLMasterParameters"
// We intentionally omit nodePath for equality testing / formatting
def productArity: Int = 10
def productElement(n: Int): Any = n match {
case 0 => name
case 1 => sourceId
case 2 => resources
case 3 => visibility
case 4 => unusedRegionTypes
case 5 => executesOnly
case 6 => requestFifo
case 7 => supports
case 8 => emits
case 9 => neverReleasesData
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!sourceId.isEmpty)
require (!visibility.isEmpty)
require (supports.putFull.contains(supports.putPartial))
// We only support these operations if we support Probe (ie: we're a cache)
require (supports.probe.contains(supports.arithmetic))
require (supports.probe.contains(supports.logical))
require (supports.probe.contains(supports.get))
require (supports.probe.contains(supports.putFull))
require (supports.probe.contains(supports.putPartial))
require (supports.probe.contains(supports.hint))
visibility.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") }
val maxTransfer = List(
supports.probe.max,
supports.arithmetic.max,
supports.logical.max,
supports.get.max,
supports.putFull.max,
supports.putPartial.max).max
def infoString = {
s"""Master Name = ${name}
|visibility = ${visibility}
|emits = ${emits.infoString}
|sourceId = ${sourceId}
|
|""".stripMargin
}
def v1copy(
name: String = name,
sourceId: IdRange = sourceId,
nodePath: Seq[BaseNode] = nodePath,
requestFifo: Boolean = requestFifo,
visibility: Seq[AddressSet] = visibility,
supportsProbe: TransferSizes = supports.probe,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = this.resources,
name = name,
visibility = visibility,
unusedRegionTypes = this.unusedRegionTypes,
executesOnly = this.executesOnly,
requestFifo = requestFifo,
supports = TLSlaveToMasterTransferSizes(
probe = supportsProbe,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = this.emits,
neverReleasesData = this.neverReleasesData,
sourceId = sourceId)
}
def v2copy(
nodePath: Seq[BaseNode] = nodePath,
resources: Seq[Resource] = resources,
name: String = name,
visibility: Seq[AddressSet] = visibility,
unusedRegionTypes: Set[RegionType.T] = unusedRegionTypes,
executesOnly: Boolean = executesOnly,
requestFifo: Boolean = requestFifo,
supports: TLSlaveToMasterTransferSizes = supports,
emits: TLMasterToSlaveTransferSizes = emits,
neverReleasesData: Boolean = neverReleasesData,
sourceId: IdRange = sourceId) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = resources,
name = name,
visibility = visibility,
unusedRegionTypes = unusedRegionTypes,
executesOnly = executesOnly,
requestFifo = requestFifo,
supports = supports,
emits = emits,
neverReleasesData = neverReleasesData,
sourceId = sourceId)
}
@deprecated("Use v1copy instead of copy","")
def copy(
name: String = name,
sourceId: IdRange = sourceId,
nodePath: Seq[BaseNode] = nodePath,
requestFifo: Boolean = requestFifo,
visibility: Seq[AddressSet] = visibility,
supportsProbe: TransferSizes = supports.probe,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint) =
{
v1copy(
name = name,
sourceId = sourceId,
nodePath = nodePath,
requestFifo = requestFifo,
visibility = visibility,
supportsProbe = supportsProbe,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint)
}
}
object TLMasterParameters {
def v1(
name: String,
sourceId: IdRange = IdRange(0,1),
nodePath: Seq[BaseNode] = Seq(),
requestFifo: Boolean = false,
visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)),
supportsProbe: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = Nil,
name = name,
visibility = visibility,
unusedRegionTypes = Set(),
executesOnly = false,
requestFifo = requestFifo,
supports = TLSlaveToMasterTransferSizes(
probe = supportsProbe,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = TLMasterToSlaveTransferSizes.unknownEmits,
neverReleasesData = false,
sourceId = sourceId)
}
def v2(
nodePath: Seq[BaseNode] = Seq(),
resources: Seq[Resource] = Nil,
name: String,
visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)),
unusedRegionTypes: Set[RegionType.T] = Set(),
executesOnly: Boolean = false,
requestFifo: Boolean = false,
supports: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownSupports,
emits: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownEmits,
neverReleasesData: Boolean = false,
sourceId: IdRange = IdRange(0,1)) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = resources,
name = name,
visibility = visibility,
unusedRegionTypes = unusedRegionTypes,
executesOnly = executesOnly,
requestFifo = requestFifo,
supports = supports,
emits = emits,
neverReleasesData = neverReleasesData,
sourceId = sourceId)
}
}
object TLClientParameters {
@deprecated("Use TLMasterParameters.v1 instead of TLClientParameters","")
def apply(
name: String,
sourceId: IdRange = IdRange(0,1),
nodePath: Seq[BaseNode] = Seq(),
requestFifo: Boolean = false,
visibility: Seq[AddressSet] = Seq(AddressSet.everything),
supportsProbe: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none) =
{
TLMasterParameters.v1(
name = name,
sourceId = sourceId,
nodePath = nodePath,
requestFifo = requestFifo,
visibility = visibility,
supportsProbe = supportsProbe,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint)
}
}
class TLMasterPortParameters private(
val masters: Seq[TLMasterParameters],
val channelBytes: TLChannelBeatBytes,
val minLatency: Int,
val echoFields: Seq[BundleFieldBase],
val requestFields: Seq[BundleFieldBase],
val responseKeys: Seq[BundleKeyBase]) extends SimpleProduct
{
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterPortParameters]
override def productPrefix = "TLMasterPortParameters"
def productArity: Int = 6
def productElement(n: Int): Any = n match {
case 0 => masters
case 1 => channelBytes
case 2 => minLatency
case 3 => echoFields
case 4 => requestFields
case 5 => responseKeys
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!masters.isEmpty)
require (minLatency >= 0)
def clients = masters
// Require disjoint ranges for Ids
IdRange.overlaps(masters.map(_.sourceId)).foreach { case (x, y) =>
require (!x.overlaps(y), s"TLClientParameters.sourceId ${x} overlaps ${y}")
}
// Bounds on required sizes
def endSourceId = masters.map(_.sourceId.end).max
def maxTransfer = masters.map(_.maxTransfer).max
// The unused sources < endSourceId
def unusedSources: Seq[Int] = {
val usedSources = masters.map(_.sourceId).sortBy(_.start)
((Seq(0) ++ usedSources.map(_.end)) zip usedSources.map(_.start)) flatMap { case (end, start) =>
end until start
}
}
// Diplomatically determined operation sizes emitted by all inward Masters
// as opposed to emits* which generate circuitry to check which specific addresses
val allEmitClaims = masters.map(_.emits).reduce( _ intersect _)
// Diplomatically determined operation sizes Emitted by at least one inward Masters
// as opposed to emits* which generate circuitry to check which specific addresses
val anyEmitClaims = masters.map(_.emits).reduce(_ mincover _)
// Diplomatically determined operation sizes supported by all inward Masters
// as opposed to supports* which generate circuitry to check which specific addresses
val allSupportProbe = masters.map(_.supports.probe) .reduce(_ intersect _)
val allSupportArithmetic = masters.map(_.supports.arithmetic).reduce(_ intersect _)
val allSupportLogical = masters.map(_.supports.logical) .reduce(_ intersect _)
val allSupportGet = masters.map(_.supports.get) .reduce(_ intersect _)
val allSupportPutFull = masters.map(_.supports.putFull) .reduce(_ intersect _)
val allSupportPutPartial = masters.map(_.supports.putPartial).reduce(_ intersect _)
val allSupportHint = masters.map(_.supports.hint) .reduce(_ intersect _)
// Diplomatically determined operation sizes supported by at least one master
// as opposed to supports* which generate circuitry to check which specific addresses
val anySupportProbe = masters.map(!_.supports.probe.none) .reduce(_ || _)
val anySupportArithmetic = masters.map(!_.supports.arithmetic.none).reduce(_ || _)
val anySupportLogical = masters.map(!_.supports.logical.none) .reduce(_ || _)
val anySupportGet = masters.map(!_.supports.get.none) .reduce(_ || _)
val anySupportPutFull = masters.map(!_.supports.putFull.none) .reduce(_ || _)
val anySupportPutPartial = masters.map(!_.supports.putPartial.none).reduce(_ || _)
val anySupportHint = masters.map(!_.supports.hint.none) .reduce(_ || _)
// These return Option[TLMasterParameters] for your convenience
def find(id: Int) = masters.find(_.sourceId.contains(id))
// Synthesizable lookup methods
def find(id: UInt) = VecInit(masters.map(_.sourceId.contains(id)))
def contains(id: UInt) = find(id).reduce(_ || _)
def requestFifo(id: UInt) = Mux1H(find(id), masters.map(c => c.requestFifo.B))
// Available during RTL runtime, checks to see if (id, size) is supported by the master's (client's) diplomatic parameters
private def sourceIdHelper(member: TLMasterParameters => TransferSizes)(id: UInt, lgSize: UInt) = {
val allSame = masters.map(member(_) == member(masters(0))).reduce(_ && _)
// this if statement is a coarse generalization of the groupBy in the sourceIdHelper2 version;
// the case where there is only one group.
if (allSame) member(masters(0)).containsLg(lgSize) else {
// Find the master associated with ID and returns whether that particular master is able to receive transaction of lgSize
Mux1H(find(id), masters.map(member(_).containsLg(lgSize)))
}
}
// Check for support of a given operation at a specific id
val supportsProbe = sourceIdHelper(_.supports.probe) _
val supportsArithmetic = sourceIdHelper(_.supports.arithmetic) _
val supportsLogical = sourceIdHelper(_.supports.logical) _
val supportsGet = sourceIdHelper(_.supports.get) _
val supportsPutFull = sourceIdHelper(_.supports.putFull) _
val supportsPutPartial = sourceIdHelper(_.supports.putPartial) _
val supportsHint = sourceIdHelper(_.supports.hint) _
// TODO: Merge sourceIdHelper2 with sourceIdHelper
private def sourceIdHelper2(
member: TLMasterParameters => TransferSizes,
sourceId: UInt,
lgSize: UInt): Bool = {
// Because sourceIds are uniquely owned by each master, we use them to group the
// cases that have to be checked.
val emitCases = groupByIntoSeq(masters)(m => member(m)).map { case (k, vs) =>
k -> vs.map(_.sourceId)
}
emitCases.map { case (s, a) =>
(s.containsLg(lgSize)) &&
a.map(_.contains(sourceId)).reduce(_||_)
}.foldLeft(false.B)(_||_)
}
// Check for emit of a given operation at a specific id
def emitsAcquireT (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireT, sourceId, lgSize)
def emitsAcquireB (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireB, sourceId, lgSize)
def emitsArithmetic(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.arithmetic, sourceId, lgSize)
def emitsLogical (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.logical, sourceId, lgSize)
def emitsGet (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.get, sourceId, lgSize)
def emitsPutFull (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putFull, sourceId, lgSize)
def emitsPutPartial(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putPartial, sourceId, lgSize)
def emitsHint (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.hint, sourceId, lgSize)
def infoString = masters.map(_.infoString).mkString
def v1copy(
clients: Seq[TLMasterParameters] = masters,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
new TLMasterPortParameters(
masters = clients,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
def v2copy(
masters: Seq[TLMasterParameters] = masters,
channelBytes: TLChannelBeatBytes = channelBytes,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
new TLMasterPortParameters(
masters = masters,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
@deprecated("Use v1copy instead of copy","")
def copy(
clients: Seq[TLMasterParameters] = masters,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
v1copy(
clients,
minLatency,
echoFields,
requestFields,
responseKeys)
}
}
object TLClientPortParameters {
@deprecated("Use TLMasterPortParameters.v1 instead of TLClientPortParameters","")
def apply(
clients: Seq[TLMasterParameters],
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
TLMasterPortParameters.v1(
clients,
minLatency,
echoFields,
requestFields,
responseKeys)
}
}
object TLMasterPortParameters {
def v1(
clients: Seq[TLMasterParameters],
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
new TLMasterPortParameters(
masters = clients,
channelBytes = TLChannelBeatBytes(),
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
def v2(
masters: Seq[TLMasterParameters],
channelBytes: TLChannelBeatBytes = TLChannelBeatBytes(),
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
new TLMasterPortParameters(
masters = masters,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
}
case class TLBundleParameters(
addressBits: Int,
dataBits: Int,
sourceBits: Int,
sinkBits: Int,
sizeBits: Int,
echoFields: Seq[BundleFieldBase],
requestFields: Seq[BundleFieldBase],
responseFields: Seq[BundleFieldBase],
hasBCE: Boolean)
{
// Chisel has issues with 0-width wires
require (addressBits >= 1)
require (dataBits >= 8)
require (sourceBits >= 1)
require (sinkBits >= 1)
require (sizeBits >= 1)
require (isPow2(dataBits))
echoFields.foreach { f => require (f.key.isControl, s"${f} is not a legal echo field") }
val addrLoBits = log2Up(dataBits/8)
// Used to uniquify bus IP names
def shortName = s"a${addressBits}d${dataBits}s${sourceBits}k${sinkBits}z${sizeBits}" + (if (hasBCE) "c" else "u")
def union(x: TLBundleParameters) =
TLBundleParameters(
max(addressBits, x.addressBits),
max(dataBits, x.dataBits),
max(sourceBits, x.sourceBits),
max(sinkBits, x.sinkBits),
max(sizeBits, x.sizeBits),
echoFields = BundleField.union(echoFields ++ x.echoFields),
requestFields = BundleField.union(requestFields ++ x.requestFields),
responseFields = BundleField.union(responseFields ++ x.responseFields),
hasBCE || x.hasBCE)
}
object TLBundleParameters
{
val emptyBundleParams = TLBundleParameters(
addressBits = 1,
dataBits = 8,
sourceBits = 1,
sinkBits = 1,
sizeBits = 1,
echoFields = Nil,
requestFields = Nil,
responseFields = Nil,
hasBCE = false)
def union(x: Seq[TLBundleParameters]) = x.foldLeft(emptyBundleParams)((x,y) => x.union(y))
def apply(master: TLMasterPortParameters, slave: TLSlavePortParameters) =
new TLBundleParameters(
addressBits = log2Up(slave.maxAddress + 1),
dataBits = slave.beatBytes * 8,
sourceBits = log2Up(master.endSourceId),
sinkBits = log2Up(slave.endSinkId),
sizeBits = log2Up(log2Ceil(max(master.maxTransfer, slave.maxTransfer))+1),
echoFields = master.echoFields,
requestFields = BundleField.accept(master.requestFields, slave.requestKeys),
responseFields = BundleField.accept(slave.responseFields, master.responseKeys),
hasBCE = master.anySupportProbe && slave.anySupportAcquireB)
}
case class TLEdgeParameters(
master: TLMasterPortParameters,
slave: TLSlavePortParameters,
params: Parameters,
sourceInfo: SourceInfo) extends FormatEdge
{
// legacy names:
def manager = slave
def client = master
val maxTransfer = max(master.maxTransfer, slave.maxTransfer)
val maxLgSize = log2Ceil(maxTransfer)
// Sanity check the link...
require (maxTransfer >= slave.beatBytes, s"Link's max transfer (${maxTransfer}) < ${slave.slaves.map(_.name)}'s beatBytes (${slave.beatBytes})")
def diplomaticClaimsMasterToSlave = master.anyEmitClaims.intersect(slave.anySupportClaims)
val bundle = TLBundleParameters(master, slave)
def formatEdge = master.infoString + "\n" + slave.infoString
}
case class TLCreditedDelay(
a: CreditedDelay,
b: CreditedDelay,
c: CreditedDelay,
d: CreditedDelay,
e: CreditedDelay)
{
def + (that: TLCreditedDelay): TLCreditedDelay = TLCreditedDelay(
a = a + that.a,
b = b + that.b,
c = c + that.c,
d = d + that.d,
e = e + that.e)
override def toString = s"(${a}, ${b}, ${c}, ${d}, ${e})"
}
object TLCreditedDelay {
def apply(delay: CreditedDelay): TLCreditedDelay = apply(delay, delay.flip, delay, delay.flip, delay)
}
case class TLCreditedManagerPortParameters(delay: TLCreditedDelay, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLCreditedClientPortParameters(delay: TLCreditedDelay, base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLCreditedEdgeParameters(client: TLCreditedClientPortParameters, manager: TLCreditedManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val delay = client.delay + manager.delay
val bundle = TLBundleParameters(client.base, manager.base)
def formatEdge = client.infoString + "\n" + manager.infoString
}
case class TLAsyncManagerPortParameters(async: AsyncQueueParams, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLAsyncClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLAsyncBundleParameters(async: AsyncQueueParams, base: TLBundleParameters)
case class TLAsyncEdgeParameters(client: TLAsyncClientPortParameters, manager: TLAsyncManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val bundle = TLAsyncBundleParameters(manager.async, TLBundleParameters(client.base, manager.base))
def formatEdge = client.infoString + "\n" + manager.infoString
}
case class TLRationalManagerPortParameters(direction: RationalDirection, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLRationalClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLRationalEdgeParameters(client: TLRationalClientPortParameters, manager: TLRationalManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val bundle = TLBundleParameters(client.base, manager.base)
def formatEdge = client.infoString + "\n" + manager.infoString
}
// To be unified, devices must agree on all of these terms
case class ManagerUnificationKey(
resources: Seq[Resource],
regionType: RegionType.T,
executable: Boolean,
supportsAcquireT: TransferSizes,
supportsAcquireB: TransferSizes,
supportsArithmetic: TransferSizes,
supportsLogical: TransferSizes,
supportsGet: TransferSizes,
supportsPutFull: TransferSizes,
supportsPutPartial: TransferSizes,
supportsHint: TransferSizes)
object ManagerUnificationKey
{
def apply(x: TLSlaveParameters): ManagerUnificationKey = ManagerUnificationKey(
resources = x.resources,
regionType = x.regionType,
executable = x.executable,
supportsAcquireT = x.supportsAcquireT,
supportsAcquireB = x.supportsAcquireB,
supportsArithmetic = x.supportsArithmetic,
supportsLogical = x.supportsLogical,
supportsGet = x.supportsGet,
supportsPutFull = x.supportsPutFull,
supportsPutPartial = x.supportsPutPartial,
supportsHint = x.supportsHint)
}
object ManagerUnification
{
def apply(slaves: Seq[TLSlaveParameters]): List[TLSlaveParameters] = {
slaves.groupBy(ManagerUnificationKey.apply).values.map { seq =>
val agree = seq.forall(_.fifoId == seq.head.fifoId)
seq(0).v1copy(
address = AddressSet.unify(seq.flatMap(_.address)),
fifoId = if (agree) seq(0).fifoId else None)
}.toList
}
}
case class TLBufferParams(
a: BufferParams = BufferParams.none,
b: BufferParams = BufferParams.none,
c: BufferParams = BufferParams.none,
d: BufferParams = BufferParams.none,
e: BufferParams = BufferParams.none
) extends DirectedBuffers[TLBufferParams] {
def copyIn(x: BufferParams) = this.copy(b = x, d = x)
def copyOut(x: BufferParams) = this.copy(a = x, c = x, e = x)
def copyInOut(x: BufferParams) = this.copyIn(x).copyOut(x)
}
/** Pretty printing of TL source id maps */
class TLSourceIdMap(tl: TLMasterPortParameters) extends IdMap[TLSourceIdMapEntry] {
private val tlDigits = String.valueOf(tl.endSourceId-1).length()
protected val fmt = s"\t[%${tlDigits}d, %${tlDigits}d) %s%s%s"
private val sorted = tl.masters.sortBy(_.sourceId)
val mapping: Seq[TLSourceIdMapEntry] = sorted.map { case c =>
TLSourceIdMapEntry(c.sourceId, c.name, c.supports.probe, c.requestFifo)
}
}
case class TLSourceIdMapEntry(tlId: IdRange, name: String, isCache: Boolean, requestFifo: Boolean)
extends IdMapEntry
{
val from = tlId
val to = tlId
val maxTransactionsInFlight = Some(tlId.size)
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module TLFragmenter_TileResetSetter( // @[Fragmenter.scala:92:9]
input clock, // @[Fragmenter.scala:92:9]
input reset, // @[Fragmenter.scala:92:9]
output auto_anon_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [9:0] auto_anon_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [20:0] auto_anon_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_anon_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_anon_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [9:0] auto_anon_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_anon_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [13:0] auto_anon_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [20:0] auto_anon_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_anon_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_anon_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_anon_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [13:0] auto_anon_out_d_bits_source // @[LazyModuleImp.scala:107:25]
);
wire _repeater_io_full; // @[Fragmenter.scala:274:30]
wire [2:0] _repeater_io_deq_bits_opcode; // @[Fragmenter.scala:274:30]
wire [2:0] _repeater_io_deq_bits_size; // @[Fragmenter.scala:274:30]
wire [9:0] _repeater_io_deq_bits_source; // @[Fragmenter.scala:274:30]
wire [20:0] _repeater_io_deq_bits_address; // @[Fragmenter.scala:274:30]
wire [7:0] _repeater_io_deq_bits_mask; // @[Fragmenter.scala:274:30]
wire auto_anon_in_a_valid_0 = auto_anon_in_a_valid; // @[Fragmenter.scala:92:9]
wire [2:0] auto_anon_in_a_bits_opcode_0 = auto_anon_in_a_bits_opcode; // @[Fragmenter.scala:92:9]
wire [2:0] auto_anon_in_a_bits_param_0 = auto_anon_in_a_bits_param; // @[Fragmenter.scala:92:9]
wire [2:0] auto_anon_in_a_bits_size_0 = auto_anon_in_a_bits_size; // @[Fragmenter.scala:92:9]
wire [9:0] auto_anon_in_a_bits_source_0 = auto_anon_in_a_bits_source; // @[Fragmenter.scala:92:9]
wire [20:0] auto_anon_in_a_bits_address_0 = auto_anon_in_a_bits_address; // @[Fragmenter.scala:92:9]
wire [7:0] auto_anon_in_a_bits_mask_0 = auto_anon_in_a_bits_mask; // @[Fragmenter.scala:92:9]
wire [63:0] auto_anon_in_a_bits_data_0 = auto_anon_in_a_bits_data; // @[Fragmenter.scala:92:9]
wire auto_anon_in_a_bits_corrupt_0 = auto_anon_in_a_bits_corrupt; // @[Fragmenter.scala:92:9]
wire auto_anon_in_d_ready_0 = auto_anon_in_d_ready; // @[Fragmenter.scala:92:9]
wire auto_anon_out_a_ready_0 = auto_anon_out_a_ready; // @[Fragmenter.scala:92:9]
wire auto_anon_out_d_valid_0 = auto_anon_out_d_valid; // @[Fragmenter.scala:92:9]
wire [2:0] auto_anon_out_d_bits_opcode_0 = auto_anon_out_d_bits_opcode; // @[Fragmenter.scala:92:9]
wire [1:0] auto_anon_out_d_bits_size_0 = auto_anon_out_d_bits_size; // @[Fragmenter.scala:92:9]
wire [13:0] auto_anon_out_d_bits_source_0 = auto_anon_out_d_bits_source; // @[Fragmenter.scala:92:9]
wire [63:0] auto_anon_in_d_bits_data = 64'h0; // @[Fragmenter.scala:92:9]
wire [63:0] auto_anon_out_d_bits_data = 64'h0; // @[Fragmenter.scala:92:9]
wire [63:0] anonIn_d_bits_data = 64'h0; // @[MixedNode.scala:551:17]
wire [63:0] anonOut_d_bits_data = 64'h0; // @[MixedNode.scala:542:17]
wire [1:0] auto_anon_in_d_bits_param = 2'h0; // @[Fragmenter.scala:92:9]
wire [1:0] auto_anon_out_d_bits_param = 2'h0; // @[Fragmenter.scala:92:9]
wire [1:0] anonIn_d_bits_param = 2'h0; // @[MixedNode.scala:551:17]
wire [1:0] anonOut_d_bits_param = 2'h0; // @[MixedNode.scala:542:17]
wire auto_anon_in_d_bits_sink = 1'h0; // @[Fragmenter.scala:92:9]
wire auto_anon_in_d_bits_denied = 1'h0; // @[Fragmenter.scala:92:9]
wire auto_anon_in_d_bits_corrupt = 1'h0; // @[Fragmenter.scala:92:9]
wire auto_anon_out_d_bits_sink = 1'h0; // @[Fragmenter.scala:92:9]
wire auto_anon_out_d_bits_denied = 1'h0; // @[Fragmenter.scala:92:9]
wire auto_anon_out_d_bits_corrupt = 1'h0; // @[Fragmenter.scala:92:9]
wire anonIn_d_bits_sink = 1'h0; // @[MixedNode.scala:551:17]
wire anonIn_d_bits_denied = 1'h0; // @[MixedNode.scala:551:17]
wire anonIn_d_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire anonOut_d_bits_sink = 1'h0; // @[MixedNode.scala:542:17]
wire anonOut_d_bits_denied = 1'h0; // @[MixedNode.scala:542:17]
wire anonOut_d_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire acknum_size = 1'h0; // @[Fragmenter.scala:213:36]
wire _dFirst_acknum_T = 1'h0; // @[Fragmenter.scala:215:50]
wire _new_gennum_T_1 = 1'h0; // @[Fragmenter.scala:306:50]
wire _aFragnum_T_2 = 1'h0; // @[Fragmenter.scala:307:84]
wire [1:0] _limit_T_1 = 2'h3; // @[Fragmenter.scala:288:49]
wire [1:0] _limit_T_3 = 2'h3; // @[Fragmenter.scala:288:49]
wire [1:0] _limit_T_5 = 2'h3; // @[Fragmenter.scala:288:49]
wire [1:0] _limit_T_7 = 2'h3; // @[Fragmenter.scala:288:49]
wire [1:0] _limit_T_9 = 2'h3; // @[Fragmenter.scala:288:49]
wire [1:0] limit = 2'h3; // @[Fragmenter.scala:288:49]
wire _find_T_4 = 1'h1; // @[Parameters.scala:137:59]
wire find_0 = 1'h1; // @[Parameters.scala:616:12]
wire [21:0] _find_T_2 = 22'h0; // @[Parameters.scala:137:46]
wire [21:0] _find_T_3 = 22'h0; // @[Parameters.scala:137:46]
wire anonIn_a_ready; // @[MixedNode.scala:551:17]
wire anonIn_a_valid = auto_anon_in_a_valid_0; // @[Fragmenter.scala:92:9]
wire [2:0] anonIn_a_bits_opcode = auto_anon_in_a_bits_opcode_0; // @[Fragmenter.scala:92:9]
wire [2:0] anonIn_a_bits_param = auto_anon_in_a_bits_param_0; // @[Fragmenter.scala:92:9]
wire [2:0] anonIn_a_bits_size = auto_anon_in_a_bits_size_0; // @[Fragmenter.scala:92:9]
wire [9:0] anonIn_a_bits_source = auto_anon_in_a_bits_source_0; // @[Fragmenter.scala:92:9]
wire [20:0] anonIn_a_bits_address = auto_anon_in_a_bits_address_0; // @[Fragmenter.scala:92:9]
wire [7:0] anonIn_a_bits_mask = auto_anon_in_a_bits_mask_0; // @[Fragmenter.scala:92:9]
wire [63:0] anonIn_a_bits_data = auto_anon_in_a_bits_data_0; // @[Fragmenter.scala:92:9]
wire anonIn_a_bits_corrupt = auto_anon_in_a_bits_corrupt_0; // @[Fragmenter.scala:92:9]
wire anonIn_d_ready = auto_anon_in_d_ready_0; // @[Fragmenter.scala:92:9]
wire anonIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [2:0] anonIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [9:0] anonIn_d_bits_source; // @[MixedNode.scala:551:17]
wire anonOut_a_ready = auto_anon_out_a_ready_0; // @[Fragmenter.scala:92:9]
wire anonOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] anonOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [1:0] anonOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [13:0] anonOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [20:0] anonOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] anonOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] anonOut_a_bits_data; // @[MixedNode.scala:542:17]
wire anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire anonOut_d_ready; // @[MixedNode.scala:542:17]
wire anonOut_d_valid = auto_anon_out_d_valid_0; // @[Fragmenter.scala:92:9]
wire [2:0] anonOut_d_bits_opcode = auto_anon_out_d_bits_opcode_0; // @[Fragmenter.scala:92:9]
wire [1:0] anonOut_d_bits_size = auto_anon_out_d_bits_size_0; // @[Fragmenter.scala:92:9]
wire [13:0] anonOut_d_bits_source = auto_anon_out_d_bits_source_0; // @[Fragmenter.scala:92:9]
wire auto_anon_in_a_ready_0; // @[Fragmenter.scala:92:9]
wire [2:0] auto_anon_in_d_bits_opcode_0; // @[Fragmenter.scala:92:9]
wire [2:0] auto_anon_in_d_bits_size_0; // @[Fragmenter.scala:92:9]
wire [9:0] auto_anon_in_d_bits_source_0; // @[Fragmenter.scala:92:9]
wire auto_anon_in_d_valid_0; // @[Fragmenter.scala:92:9]
wire [2:0] auto_anon_out_a_bits_opcode_0; // @[Fragmenter.scala:92:9]
wire [2:0] auto_anon_out_a_bits_param_0; // @[Fragmenter.scala:92:9]
wire [1:0] auto_anon_out_a_bits_size_0; // @[Fragmenter.scala:92:9]
wire [13:0] auto_anon_out_a_bits_source_0; // @[Fragmenter.scala:92:9]
wire [20:0] auto_anon_out_a_bits_address_0; // @[Fragmenter.scala:92:9]
wire [7:0] auto_anon_out_a_bits_mask_0; // @[Fragmenter.scala:92:9]
wire [63:0] auto_anon_out_a_bits_data_0; // @[Fragmenter.scala:92:9]
wire auto_anon_out_a_bits_corrupt_0; // @[Fragmenter.scala:92:9]
wire auto_anon_out_a_valid_0; // @[Fragmenter.scala:92:9]
wire auto_anon_out_d_ready_0; // @[Fragmenter.scala:92:9]
assign auto_anon_in_a_ready_0 = anonIn_a_ready; // @[Fragmenter.scala:92:9]
assign anonOut_a_bits_data = anonIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
wire _anonIn_d_valid_T_1; // @[Fragmenter.scala:236:36]
assign auto_anon_in_d_valid_0 = anonIn_d_valid; // @[Fragmenter.scala:92:9]
assign auto_anon_in_d_bits_opcode_0 = anonIn_d_bits_opcode; // @[Fragmenter.scala:92:9]
wire [2:0] _anonIn_d_bits_size_T; // @[Fragmenter.scala:239:32]
assign auto_anon_in_d_bits_size_0 = anonIn_d_bits_size; // @[Fragmenter.scala:92:9]
wire [9:0] _anonIn_d_bits_source_T; // @[Fragmenter.scala:238:47]
assign auto_anon_in_d_bits_source_0 = anonIn_d_bits_source; // @[Fragmenter.scala:92:9]
assign auto_anon_out_a_valid_0 = anonOut_a_valid; // @[Fragmenter.scala:92:9]
assign auto_anon_out_a_bits_opcode_0 = anonOut_a_bits_opcode; // @[Fragmenter.scala:92:9]
assign auto_anon_out_a_bits_param_0 = anonOut_a_bits_param; // @[Fragmenter.scala:92:9]
assign auto_anon_out_a_bits_size_0 = anonOut_a_bits_size; // @[Fragmenter.scala:92:9]
wire [13:0] _anonOut_a_bits_source_T; // @[Fragmenter.scala:317:33]
assign auto_anon_out_a_bits_source_0 = anonOut_a_bits_source; // @[Fragmenter.scala:92:9]
wire [20:0] _anonOut_a_bits_address_T_6; // @[Fragmenter.scala:316:49]
assign auto_anon_out_a_bits_address_0 = anonOut_a_bits_address; // @[Fragmenter.scala:92:9]
wire [7:0] _anonOut_a_bits_mask_T; // @[Fragmenter.scala:325:31]
assign auto_anon_out_a_bits_mask_0 = anonOut_a_bits_mask; // @[Fragmenter.scala:92:9]
assign auto_anon_out_a_bits_data_0 = anonOut_a_bits_data; // @[Fragmenter.scala:92:9]
assign auto_anon_out_a_bits_corrupt_0 = anonOut_a_bits_corrupt; // @[Fragmenter.scala:92:9]
wire _anonOut_d_ready_T; // @[Fragmenter.scala:235:35]
assign auto_anon_out_d_ready_0 = anonOut_d_ready; // @[Fragmenter.scala:92:9]
assign anonIn_d_bits_opcode = anonOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
wire [1:0] dsizeOH_shiftAmount = anonOut_d_bits_size; // @[OneHot.scala:64:49]
reg [2:0] acknum; // @[Fragmenter.scala:201:29]
reg [2:0] dOrig; // @[Fragmenter.scala:202:24]
reg dToggle; // @[Fragmenter.scala:203:30]
wire [2:0] dFragnum = anonOut_d_bits_source[2:0]; // @[Fragmenter.scala:204:41]
wire [2:0] acknum_fragment = dFragnum; // @[Fragmenter.scala:204:41, :212:40]
wire dFirst = acknum == 3'h0; // @[Fragmenter.scala:201:29, :205:29]
wire dLast = dFragnum == 3'h0; // @[Fragmenter.scala:204:41, :206:30]
wire _drop_T_1 = dLast; // @[Fragmenter.scala:206:30, :234:37]
wire [3:0] _dsizeOH_T = 4'h1 << dsizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [3:0] dsizeOH = _dsizeOH_T; // @[OneHot.scala:65:{12,27}]
wire [5:0] _dsizeOH1_T = 6'h7 << anonOut_d_bits_size; // @[package.scala:243:71]
wire [2:0] _dsizeOH1_T_1 = _dsizeOH1_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] dsizeOH1 = ~_dsizeOH1_T_1; // @[package.scala:243:{46,76}]
wire dHasData = anonOut_d_bits_opcode[0]; // @[Edges.scala:106:36]
wire [2:0] dFirst_acknum = acknum_fragment; // @[Fragmenter.scala:212:40, :215:45]
wire _ack_decrement_T = dsizeOH[3]; // @[OneHot.scala:65:27]
wire ack_decrement = dHasData | _ack_decrement_T; // @[Fragmenter.scala:216:{32,56}]
wire [5:0] _dFirst_size_T = {dFragnum, 3'h0}; // @[Fragmenter.scala:204:41, :218:47]
wire [5:0] _dFirst_size_T_1 = {_dFirst_size_T[5:3], _dFirst_size_T[2:0] | dsizeOH1}; // @[package.scala:243:46]
wire [6:0] _dFirst_size_T_2 = {_dFirst_size_T_1, 1'h0}; // @[package.scala:241:35]
wire [6:0] _dFirst_size_T_3 = {_dFirst_size_T_2[6:1], 1'h1}; // @[package.scala:241:{35,40}]
wire [6:0] _dFirst_size_T_4 = {1'h0, _dFirst_size_T_1}; // @[package.scala:241:53]
wire [6:0] _dFirst_size_T_5 = ~_dFirst_size_T_4; // @[package.scala:241:{49,53}]
wire [6:0] _dFirst_size_T_6 = _dFirst_size_T_3 & _dFirst_size_T_5; // @[package.scala:241:{40,47,49}]
wire [2:0] dFirst_size_hi = _dFirst_size_T_6[6:4]; // @[OneHot.scala:30:18]
wire [3:0] dFirst_size_lo = _dFirst_size_T_6[3:0]; // @[OneHot.scala:31:18]
wire _dFirst_size_T_7 = |dFirst_size_hi; // @[OneHot.scala:30:18, :32:14]
wire [3:0] _dFirst_size_T_8 = {1'h0, dFirst_size_hi} | dFirst_size_lo; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [1:0] dFirst_size_hi_1 = _dFirst_size_T_8[3:2]; // @[OneHot.scala:30:18, :32:28]
wire [1:0] dFirst_size_lo_1 = _dFirst_size_T_8[1:0]; // @[OneHot.scala:31:18, :32:28]
wire _dFirst_size_T_9 = |dFirst_size_hi_1; // @[OneHot.scala:30:18, :32:14]
wire [1:0] _dFirst_size_T_10 = dFirst_size_hi_1 | dFirst_size_lo_1; // @[OneHot.scala:30:18, :31:18, :32:28]
wire _dFirst_size_T_11 = _dFirst_size_T_10[1]; // @[OneHot.scala:32:28]
wire [1:0] _dFirst_size_T_12 = {_dFirst_size_T_9, _dFirst_size_T_11}; // @[OneHot.scala:32:{10,14}]
wire [2:0] dFirst_size = {_dFirst_size_T_7, _dFirst_size_T_12}; // @[OneHot.scala:32:{10,14}]
wire [3:0] _acknum_T = {1'h0, acknum} - {3'h0, ack_decrement}; // @[Fragmenter.scala:201:29, :216:32, :221:55]
wire [2:0] _acknum_T_1 = _acknum_T[2:0]; // @[Fragmenter.scala:221:55]
wire [2:0] _acknum_T_2 = dFirst ? dFirst_acknum : _acknum_T_1; // @[Fragmenter.scala:205:29, :215:45, :221:{24,55}]
wire _dToggle_T = anonOut_d_bits_source[3]; // @[Fragmenter.scala:224:41]
wire _drop_T = ~dHasData; // @[Fragmenter.scala:234:20]
wire _drop_T_2 = ~_drop_T_1; // @[Fragmenter.scala:234:{33,37}]
wire drop = _drop_T & _drop_T_2; // @[Fragmenter.scala:234:{20,30,33}]
assign _anonOut_d_ready_T = anonIn_d_ready | drop; // @[Fragmenter.scala:234:30, :235:35]
assign anonOut_d_ready = _anonOut_d_ready_T; // @[Fragmenter.scala:235:35]
wire _anonIn_d_valid_T = ~drop; // @[Fragmenter.scala:234:30, :236:39]
assign _anonIn_d_valid_T_1 = anonOut_d_valid & _anonIn_d_valid_T; // @[Fragmenter.scala:236:{36,39}]
assign anonIn_d_valid = _anonIn_d_valid_T_1; // @[Fragmenter.scala:236:36]
assign _anonIn_d_bits_source_T = anonOut_d_bits_source[13:4]; // @[Fragmenter.scala:238:47]
assign anonIn_d_bits_source = _anonIn_d_bits_source_T; // @[Fragmenter.scala:238:47]
assign _anonIn_d_bits_size_T = dFirst ? dFirst_size : dOrig; // @[OneHot.scala:32:10]
assign anonIn_d_bits_size = _anonIn_d_bits_size_T; // @[Fragmenter.scala:239:32]
wire [20:0] _find_T; // @[Parameters.scala:137:31]
wire [21:0] _find_T_1 = {1'h0, _find_T}; // @[Parameters.scala:137:{31,41}]
wire _limit_T = _repeater_io_deq_bits_opcode == 3'h0; // @[Fragmenter.scala:274:30, :288:49]
wire _limit_T_2 = _repeater_io_deq_bits_opcode == 3'h1; // @[Fragmenter.scala:274:30, :288:49]
wire _limit_T_4 = _repeater_io_deq_bits_opcode == 3'h2; // @[Fragmenter.scala:274:30, :288:49]
wire _limit_T_6 = _repeater_io_deq_bits_opcode == 3'h3; // @[Fragmenter.scala:274:30, :288:49]
wire _limit_T_8 = _repeater_io_deq_bits_opcode == 3'h4; // @[Fragmenter.scala:274:30, :288:49]
wire _limit_T_10 = _repeater_io_deq_bits_opcode == 3'h5; // @[Fragmenter.scala:274:30, :288:49]
wire _aFrag_T = _repeater_io_deq_bits_size[2]; // @[Fragmenter.scala:274:30, :297:31]
wire [2:0] aFrag = _aFrag_T ? 3'h3 : _repeater_io_deq_bits_size; // @[Fragmenter.scala:274:30, :297:{24,31}]
wire [12:0] _aOrigOH1_T = 13'h3F << _repeater_io_deq_bits_size; // @[package.scala:243:71]
wire [5:0] _aOrigOH1_T_1 = _aOrigOH1_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] aOrigOH1 = ~_aOrigOH1_T_1; // @[package.scala:243:{46,76}]
wire [9:0] _aFragOH1_T = 10'h7 << aFrag; // @[package.scala:243:71]
wire [2:0] _aFragOH1_T_1 = _aFragOH1_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] aFragOH1 = ~_aFragOH1_T_1; // @[package.scala:243:{46,76}]
wire _aHasData_opdata_T = _repeater_io_deq_bits_opcode[2]; // @[Fragmenter.scala:274:30]
wire aHasData = ~_aHasData_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] aMask = aHasData ? 3'h0 : aFragOH1; // @[package.scala:243:46]
reg [2:0] gennum; // @[Fragmenter.scala:303:29]
wire aFirst = gennum == 3'h0; // @[Fragmenter.scala:303:29, :304:29]
wire [2:0] _old_gennum1_T = aOrigOH1[5:3]; // @[package.scala:243:46]
wire [3:0] _old_gennum1_T_1 = {1'h0, gennum} - 4'h1; // @[Fragmenter.scala:303:29, :305:79]
wire [2:0] _old_gennum1_T_2 = _old_gennum1_T_1[2:0]; // @[Fragmenter.scala:305:79]
wire [2:0] old_gennum1 = aFirst ? _old_gennum1_T : _old_gennum1_T_2; // @[Fragmenter.scala:304:29, :305:{30,48,79}]
wire [2:0] _aFragnum_T = old_gennum1; // @[Fragmenter.scala:305:30, :307:40]
wire [2:0] _new_gennum_T = ~old_gennum1; // @[Fragmenter.scala:305:30, :306:28]
wire [2:0] _new_gennum_T_2 = _new_gennum_T; // @[Fragmenter.scala:306:{28,41}]
wire [2:0] new_gennum = ~_new_gennum_T_2; // @[Fragmenter.scala:306:{26,41}]
wire [2:0] _aFragnum_T_1 = ~_aFragnum_T; // @[Fragmenter.scala:307:{26,40}]
wire [2:0] _aFragnum_T_3 = _aFragnum_T_1; // @[Fragmenter.scala:307:{26,72}]
wire [2:0] aFragnum = ~_aFragnum_T_3; // @[Fragmenter.scala:307:{24,72}]
wire aLast = ~(|aFragnum); // @[Fragmenter.scala:307:24, :308:30]
reg aToggle_r; // @[Fragmenter.scala:309:54]
wire _aToggle_T = aFirst ? dToggle : aToggle_r; // @[Fragmenter.scala:203:30, :304:29, :309:{27,54}]
wire aToggle = ~_aToggle_T; // @[Fragmenter.scala:309:{23,27}]
wire _repeater_io_repeat_T = ~aHasData; // @[Fragmenter.scala:314:31]
wire _repeater_io_repeat_T_1 = |aFragnum; // @[Fragmenter.scala:307:24, :308:30, :314:53]
wire _repeater_io_repeat_T_2 = _repeater_io_repeat_T & _repeater_io_repeat_T_1; // @[Fragmenter.scala:314:{31,41,53}]
wire [5:0] _anonOut_a_bits_address_T = {old_gennum1, 3'h0}; // @[Fragmenter.scala:305:30, :316:65]
wire [5:0] _anonOut_a_bits_address_T_1 = ~aOrigOH1; // @[package.scala:243:46]
wire [5:0] _anonOut_a_bits_address_T_2 = _anonOut_a_bits_address_T | _anonOut_a_bits_address_T_1; // @[Fragmenter.scala:316:{65,88,90}]
wire [5:0] _anonOut_a_bits_address_T_3 = {_anonOut_a_bits_address_T_2[5:3], _anonOut_a_bits_address_T_2[2:0] | aFragOH1}; // @[package.scala:243:46]
wire [5:0] _anonOut_a_bits_address_T_4 = {_anonOut_a_bits_address_T_3[5:3], 3'h7}; // @[Fragmenter.scala:316:{100,111}]
wire [5:0] _anonOut_a_bits_address_T_5 = ~_anonOut_a_bits_address_T_4; // @[Fragmenter.scala:316:{51,111}]
assign _anonOut_a_bits_address_T_6 = {_repeater_io_deq_bits_address[20:6], _repeater_io_deq_bits_address[5:0] | _anonOut_a_bits_address_T_5}; // @[Fragmenter.scala:274:30, :316:{49,51}]
assign anonOut_a_bits_address = _anonOut_a_bits_address_T_6; // @[Fragmenter.scala:316:49]
wire [10:0] anonOut_a_bits_source_hi = {_repeater_io_deq_bits_source, aToggle}; // @[Fragmenter.scala:274:30, :309:23, :317:33]
assign _anonOut_a_bits_source_T = {anonOut_a_bits_source_hi, aFragnum}; // @[Fragmenter.scala:307:24, :317:33]
assign anonOut_a_bits_source = _anonOut_a_bits_source_T; // @[Fragmenter.scala:317:33]
assign anonOut_a_bits_size = aFrag[1:0]; // @[Fragmenter.scala:297:24, :318:25] |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File ToAXI4.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.amba.{AMBACorrupt, AMBACorruptField, AMBAProt, AMBAProtField}
import freechips.rocketchip.amba.axi4.{AXI4BundleARW, AXI4MasterParameters, AXI4MasterPortParameters, AXI4Parameters, AXI4Imp}
import freechips.rocketchip.diplomacy.{IdMap, IdMapEntry, IdRange}
import freechips.rocketchip.util.{BundleField, ControlKey, ElaborationArtefacts, UIntToOH1}
import freechips.rocketchip.util.DataToAugmentedData
class AXI4TLStateBundle(val sourceBits: Int) extends Bundle {
val size = UInt(4.W)
val source = UInt((sourceBits max 1).W)
}
case object AXI4TLState extends ControlKey[AXI4TLStateBundle]("tl_state")
case class AXI4TLStateField(sourceBits: Int) extends BundleField[AXI4TLStateBundle](AXI4TLState, Output(new AXI4TLStateBundle(sourceBits)), x => {
x.size := 0.U
x.source := 0.U
})
/** TLtoAXI4IdMap serves as a record for the translation performed between id spaces.
*
* Its member [axi4Masters] is used as the new AXI4MasterParameters in diplomacy.
* Its member [mapping] is used as the template for the circuit generated in TLToAXI4Node.module.
*/
class TLtoAXI4IdMap(tlPort: TLMasterPortParameters) extends IdMap[TLToAXI4IdMapEntry]
{
val tlMasters = tlPort.masters.sortBy(_.sourceId).sortWith(TLToAXI4.sortByType)
private val axi4IdSize = tlMasters.map { tl => if (tl.requestFifo) 1 else tl.sourceId.size }
private val axi4IdStart = axi4IdSize.scanLeft(0)(_+_).init
val axi4Masters = axi4IdStart.zip(axi4IdSize).zip(tlMasters).map { case ((start, size), tl) =>
AXI4MasterParameters(
name = tl.name,
id = IdRange(start, start+size),
aligned = true,
maxFlight = Some(if (tl.requestFifo) tl.sourceId.size else 1),
nodePath = tl.nodePath)
}
private val axi4IdEnd = axi4Masters.map(_.id.end).max
private val axiDigits = String.valueOf(axi4IdEnd-1).length()
private val tlDigits = String.valueOf(tlPort.endSourceId-1).length()
protected val fmt = s"\t[%${axiDigits}d, %${axiDigits}d) <= [%${tlDigits}d, %${tlDigits}d) %s%s%s"
val mapping: Seq[TLToAXI4IdMapEntry] = tlMasters.zip(axi4Masters).map { case (tl, axi) =>
TLToAXI4IdMapEntry(axi.id, tl.sourceId, tl.name, tl.supports.probe, tl.requestFifo)
}
}
case class TLToAXI4IdMapEntry(axi4Id: IdRange, tlId: IdRange, name: String, isCache: Boolean, requestFifo: Boolean)
extends IdMapEntry
{
val from = tlId
val to = axi4Id
val maxTransactionsInFlight = Some(tlId.size)
}
case class TLToAXI4Node(wcorrupt: Boolean = true)(implicit valName: ValName) extends MixedAdapterNode(TLImp, AXI4Imp)(
dFn = { p =>
AXI4MasterPortParameters(
masters = (new TLtoAXI4IdMap(p)).axi4Masters,
requestFields = (if (wcorrupt) Seq(AMBACorruptField()) else Seq()) ++ p.requestFields.filter(!_.isInstanceOf[AMBAProtField]),
echoFields = AXI4TLStateField(log2Ceil(p.endSourceId)) +: p.echoFields,
responseKeys = p.responseKeys)
},
uFn = { p => TLSlavePortParameters.v1(
managers = p.slaves.map { case s =>
TLSlaveParameters.v1(
address = s.address,
resources = s.resources,
regionType = s.regionType,
executable = s.executable,
nodePath = s.nodePath,
supportsGet = s.supportsRead,
supportsPutFull = s.supportsWrite,
supportsPutPartial = s.supportsWrite,
fifoId = Some(0),
mayDenyPut = true,
mayDenyGet = true)},
beatBytes = p.beatBytes,
minLatency = p.minLatency,
responseFields = p.responseFields,
requestKeys = AMBAProt +: p.requestKeys)
})
// wcorrupt alone is not enough; a slave must include AMBACorrupt in the slave port's requestKeys
class TLToAXI4(val combinational: Boolean = true, val adapterName: Option[String] = None, val stripBits: Int = 0, val wcorrupt: Boolean = true)(implicit p: Parameters) extends LazyModule
{
require(stripBits == 0, "stripBits > 0 is no longer supported on TLToAXI4")
val node = TLToAXI4Node(wcorrupt)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
val slaves = edgeOut.slave.slaves
// All pairs of slaves must promise that they will never interleave data
require (slaves(0).interleavedId.isDefined)
slaves.foreach { s => require (s.interleavedId == slaves(0).interleavedId) }
// Construct the source=>ID mapping table
val map = new TLtoAXI4IdMap(edgeIn.client)
val sourceStall = WireDefault(VecInit.fill(edgeIn.client.endSourceId)(false.B))
val sourceTable = WireDefault(VecInit.fill(edgeIn.client.endSourceId)(0.U.asTypeOf(out.aw.bits.id)))
val idStall = WireDefault(VecInit.fill(edgeOut.master.endId)(false.B))
var idCount = Array.fill(edgeOut.master.endId) { None:Option[Int] }
map.mapping.foreach { case TLToAXI4IdMapEntry(axi4Id, tlId, _, _, fifo) =>
for (i <- 0 until tlId.size) {
val id = axi4Id.start + (if (fifo) 0 else i)
sourceStall(tlId.start + i) := idStall(id)
sourceTable(tlId.start + i) := id.U
}
if (fifo) { idCount(axi4Id.start) = Some(tlId.size) }
}
adapterName.foreach { n =>
println(s"$n AXI4-ID <= TL-Source mapping:\n${map.pretty}\n")
ElaborationArtefacts.add(s"$n.axi4.json", s"""{"mapping":[${map.mapping.mkString(",")}]}""")
}
// We need to keep the following state from A => D: (size, source)
// All of those fields could potentially require 0 bits (argh. Chisel.)
// We will pack all of that extra information into the echo bits.
require (log2Ceil(edgeIn.maxLgSize+1) <= 4)
val a_address = edgeIn.address(in.a.bits)
val a_source = in.a.bits.source
val a_size = edgeIn.size(in.a.bits)
val a_isPut = edgeIn.hasData(in.a.bits)
val (a_first, a_last, _) = edgeIn.firstlast(in.a)
val r_state = out.r.bits.echo(AXI4TLState)
val r_source = r_state.source
val r_size = r_state.size
val b_state = out.b.bits.echo(AXI4TLState)
val b_source = b_state.source
val b_size = b_state.size
// We need these Queues because AXI4 queues are irrevocable
val depth = if (combinational) 1 else 2
val out_arw = Wire(Decoupled(new AXI4BundleARW(out.params)))
val out_w = Wire(chiselTypeOf(out.w))
out.w :<>= Queue.irrevocable(out_w, entries=depth, flow=combinational)
val queue_arw = Queue.irrevocable(out_arw, entries=depth, flow=combinational)
// Fan out the ARW channel to AR and AW
out.ar.bits := queue_arw.bits
out.aw.bits := queue_arw.bits
out.ar.valid := queue_arw.valid && !queue_arw.bits.wen
out.aw.valid := queue_arw.valid && queue_arw.bits.wen
queue_arw.ready := Mux(queue_arw.bits.wen, out.aw.ready, out.ar.ready)
val beatBytes = edgeIn.manager.beatBytes
val maxSize = log2Ceil(beatBytes).U
val doneAW = RegInit(false.B)
when (in.a.fire) { doneAW := !a_last }
val arw = out_arw.bits
arw.wen := a_isPut
arw.id := sourceTable(a_source)
arw.addr := a_address
arw.len := UIntToOH1(a_size, AXI4Parameters.lenBits + log2Ceil(beatBytes)) >> log2Ceil(beatBytes)
arw.size := Mux(a_size >= maxSize, maxSize, a_size)
arw.burst := AXI4Parameters.BURST_INCR
arw.lock := 0.U // not exclusive (LR/SC unsupported b/c no forward progress guarantee)
arw.cache := 0.U // do not allow AXI to modify our transactions
arw.prot := AXI4Parameters.PROT_PRIVILEGED
arw.qos := 0.U // no QoS
Connectable.waiveUnmatched(arw.user, in.a.bits.user) match {
case (lhs, rhs) => lhs :<= rhs
}
Connectable.waiveUnmatched(arw.echo, in.a.bits.echo) match {
case (lhs, rhs) => lhs :<= rhs
}
val a_extra = arw.echo(AXI4TLState)
a_extra.source := a_source
a_extra.size := a_size
in.a.bits.user.lift(AMBAProt).foreach { x =>
val prot = Wire(Vec(3, Bool()))
val cache = Wire(Vec(4, Bool()))
prot(0) := x.privileged
prot(1) := !x.secure
prot(2) := x.fetch
cache(0) := x.bufferable
cache(1) := x.modifiable
cache(2) := x.readalloc
cache(3) := x.writealloc
arw.prot := Cat(prot.reverse)
arw.cache := Cat(cache.reverse)
}
val stall = sourceStall(in.a.bits.source) && a_first
in.a.ready := !stall && Mux(a_isPut, (doneAW || out_arw.ready) && out_w.ready, out_arw.ready)
out_arw.valid := !stall && in.a.valid && Mux(a_isPut, !doneAW && out_w.ready, true.B)
out_w.valid := !stall && in.a.valid && a_isPut && (doneAW || out_arw.ready)
out_w.bits.data := in.a.bits.data
out_w.bits.strb := in.a.bits.mask
out_w.bits.last := a_last
out_w.bits.user.lift(AMBACorrupt).foreach { _ := in.a.bits.corrupt }
// R and B => D arbitration
val r_holds_d = RegInit(false.B)
when (out.r.fire) { r_holds_d := !out.r.bits.last }
// Give R higher priority than B, unless B has been delayed for 8 cycles
val b_delay = Reg(UInt(3.W))
when (out.b.valid && !out.b.ready) {
b_delay := b_delay + 1.U
} .otherwise {
b_delay := 0.U
}
val r_wins = (out.r.valid && b_delay =/= 7.U) || r_holds_d
out.r.ready := in.d.ready && r_wins
out.b.ready := in.d.ready && !r_wins
in.d.valid := Mux(r_wins, out.r.valid, out.b.valid)
// If the first beat of the AXI RRESP is RESP_DECERR, treat this as a denied
// request. We must pulse extend this value as AXI is allowed to change the
// value of RRESP on every beat, and ChipLink may not.
val r_first = RegInit(true.B)
when (out.r.fire) { r_first := out.r.bits.last }
val r_denied = out.r.bits.resp === AXI4Parameters.RESP_DECERR holdUnless r_first
val r_corrupt = out.r.bits.resp =/= AXI4Parameters.RESP_OKAY
val b_denied = out.b.bits.resp =/= AXI4Parameters.RESP_OKAY
val r_d = edgeIn.AccessAck(r_source, r_size, 0.U, denied = r_denied, corrupt = r_corrupt || r_denied)
val b_d = edgeIn.AccessAck(b_source, b_size, denied = b_denied)
Connectable.waiveUnmatched(r_d.user, out.r.bits.user) match {
case (lhs, rhs) => lhs.squeezeAll :<= rhs.squeezeAll
}
Connectable.waiveUnmatched(r_d.echo, out.r.bits.echo) match {
case (lhs, rhs) => lhs.squeezeAll :<= rhs.squeezeAll
}
Connectable.waiveUnmatched(b_d.user, out.b.bits.user) match {
case (lhs, rhs) => lhs.squeezeAll :<= rhs.squeezeAll
}
Connectable.waiveUnmatched(b_d.echo, out.b.bits.echo) match {
case (lhs, rhs) => lhs.squeezeAll :<= rhs.squeezeAll
}
in.d.bits := Mux(r_wins, r_d, b_d)
in.d.bits.data := out.r.bits.data // avoid a costly Mux
// We need to track if any reads or writes are inflight for a given ID.
// If the opposite type arrives, we must stall until it completes.
val a_sel = UIntToOH(arw.id, edgeOut.master.endId).asBools
val d_sel = UIntToOH(Mux(r_wins, out.r.bits.id, out.b.bits.id), edgeOut.master.endId).asBools
val d_last = Mux(r_wins, out.r.bits.last, true.B)
// If FIFO was requested, ensure that R+W ordering is preserved
(a_sel zip d_sel zip idStall zip idCount) foreach { case (((as, ds), s), n) =>
// AXI does not guarantee read vs. write ordering. In particular, if we
// are in the middle of receiving a read burst and then issue a write,
// the write might affect the read burst. This violates FIFO behaviour.
// To solve this, we must wait until the last beat of a burst, but this
// means that a TileLink master which performs early source reuse can
// have one more transaction inflight than we promised AXI; stall it too.
val maxCount = n.getOrElse(1)
val count = RegInit(0.U(log2Ceil(maxCount + 1).W))
val write = Reg(Bool())
val idle = count === 0.U
val inc = as && out_arw.fire
val dec = ds && d_last && in.d.fire
count := count + inc.asUInt - dec.asUInt
assert (!dec || count =/= 0.U) // underflow
assert (!inc || count =/= maxCount.U) // overflow
when (inc) { write := arw.wen }
// If only one transaction can be inflight, it can't mismatch
val mismatch = if (maxCount > 1) { write =/= arw.wen } else { false.B }
s := (!idle && mismatch) || (count === maxCount.U)
}
// Tie off unused channels
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
}
}
}
object TLToAXI4
{
def apply(combinational: Boolean = true, adapterName: Option[String] = None, stripBits: Int = 0, wcorrupt: Boolean = true)(implicit p: Parameters) =
{
val tl2axi4 = LazyModule(new TLToAXI4(combinational, adapterName, stripBits, wcorrupt))
tl2axi4.node
}
def sortByType(a: TLMasterParameters, b: TLMasterParameters): Boolean = {
if ( a.supports.probe && !b.supports.probe) return false
if (!a.supports.probe && b.supports.probe) return true
if ( a.requestFifo && !b.requestFifo ) return false
if (!a.requestFifo && b.requestFifo ) return true
return false
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLToAXI4_1( // @[ToAXI4.scala:103:9]
input clock, // @[ToAXI4.scala:103:9]
input reset, // @[ToAXI4.scala:103:9]
output auto_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_aw_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_aw_valid, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_aw_bits_id, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_aw_bits_addr, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_aw_bits_len, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_aw_bits_size, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_out_aw_bits_burst, // @[LazyModuleImp.scala:107:25]
output auto_out_aw_bits_lock, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_aw_bits_cache, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_aw_bits_prot, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_aw_bits_qos, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_aw_bits_echo_tl_state_size, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_aw_bits_echo_tl_state_source, // @[LazyModuleImp.scala:107:25]
input auto_out_w_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_w_valid, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_w_bits_data, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_w_bits_strb, // @[LazyModuleImp.scala:107:25]
output auto_out_w_bits_last, // @[LazyModuleImp.scala:107:25]
output auto_out_b_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_b_valid, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_out_b_bits_id, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_b_bits_resp, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_b_bits_echo_tl_state_size, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_out_b_bits_echo_tl_state_source, // @[LazyModuleImp.scala:107:25]
input auto_out_ar_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_ar_valid, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_ar_bits_id, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_ar_bits_addr, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_ar_bits_len, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_ar_bits_size, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_out_ar_bits_burst, // @[LazyModuleImp.scala:107:25]
output auto_out_ar_bits_lock, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_ar_bits_cache, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_ar_bits_prot, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_ar_bits_qos, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_ar_bits_echo_tl_state_size, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_ar_bits_echo_tl_state_source, // @[LazyModuleImp.scala:107:25]
output auto_out_r_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_r_valid, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_out_r_bits_id, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_r_bits_data, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_r_bits_resp, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_r_bits_echo_tl_state_size, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_out_r_bits_echo_tl_state_source, // @[LazyModuleImp.scala:107:25]
input auto_out_r_bits_last // @[LazyModuleImp.scala:107:25]
);
wire [7:0] out_arw_bits_id; // @[ToAXI4.scala:153:25]
wire auto_in_a_valid_0 = auto_in_a_valid; // @[ToAXI4.scala:103:9]
wire [2:0] auto_in_a_bits_opcode_0 = auto_in_a_bits_opcode; // @[ToAXI4.scala:103:9]
wire [2:0] auto_in_a_bits_param_0 = auto_in_a_bits_param; // @[ToAXI4.scala:103:9]
wire [2:0] auto_in_a_bits_size_0 = auto_in_a_bits_size; // @[ToAXI4.scala:103:9]
wire [7:0] auto_in_a_bits_source_0 = auto_in_a_bits_source; // @[ToAXI4.scala:103:9]
wire [31:0] auto_in_a_bits_address_0 = auto_in_a_bits_address; // @[ToAXI4.scala:103:9]
wire [7:0] auto_in_a_bits_mask_0 = auto_in_a_bits_mask; // @[ToAXI4.scala:103:9]
wire [63:0] auto_in_a_bits_data_0 = auto_in_a_bits_data; // @[ToAXI4.scala:103:9]
wire auto_in_a_bits_corrupt_0 = auto_in_a_bits_corrupt; // @[ToAXI4.scala:103:9]
wire auto_in_d_ready_0 = auto_in_d_ready; // @[ToAXI4.scala:103:9]
wire auto_out_aw_ready_0 = auto_out_aw_ready; // @[ToAXI4.scala:103:9]
wire auto_out_w_ready_0 = auto_out_w_ready; // @[ToAXI4.scala:103:9]
wire auto_out_b_valid_0 = auto_out_b_valid; // @[ToAXI4.scala:103:9]
wire [7:0] auto_out_b_bits_id_0 = auto_out_b_bits_id; // @[ToAXI4.scala:103:9]
wire [1:0] auto_out_b_bits_resp_0 = auto_out_b_bits_resp; // @[ToAXI4.scala:103:9]
wire [3:0] auto_out_b_bits_echo_tl_state_size_0 = auto_out_b_bits_echo_tl_state_size; // @[ToAXI4.scala:103:9]
wire [7:0] auto_out_b_bits_echo_tl_state_source_0 = auto_out_b_bits_echo_tl_state_source; // @[ToAXI4.scala:103:9]
wire auto_out_ar_ready_0 = auto_out_ar_ready; // @[ToAXI4.scala:103:9]
wire auto_out_r_valid_0 = auto_out_r_valid; // @[ToAXI4.scala:103:9]
wire [7:0] auto_out_r_bits_id_0 = auto_out_r_bits_id; // @[ToAXI4.scala:103:9]
wire [63:0] auto_out_r_bits_data_0 = auto_out_r_bits_data; // @[ToAXI4.scala:103:9]
wire [1:0] auto_out_r_bits_resp_0 = auto_out_r_bits_resp; // @[ToAXI4.scala:103:9]
wire [3:0] auto_out_r_bits_echo_tl_state_size_0 = auto_out_r_bits_echo_tl_state_size; // @[ToAXI4.scala:103:9]
wire [7:0] auto_out_r_bits_echo_tl_state_source_0 = auto_out_r_bits_echo_tl_state_source; // @[ToAXI4.scala:103:9]
wire auto_out_r_bits_last_0 = auto_out_r_bits_last; // @[ToAXI4.scala:103:9]
wire [255:0][7:0] _GEN = '{8'h0, 8'h0, 8'h0, 8'h0, 8'h0, 8'h0, 8'h0, 8'h0, 8'h0, 8'h0, 8'h0, 8'h0, 8'hF3, 8'hF2, 8'hF1, 8'hF0, 8'hEF, 8'hEE, 8'hED, 8'hEC, 8'hEB, 8'hEA, 8'hE9, 8'hE8, 8'hE7, 8'hE6, 8'hE5, 8'hE4, 8'hE3, 8'hE2, 8'hE1, 8'hE0, 8'hDF, 8'hDE, 8'hDD, 8'hDC, 8'hDB, 8'hDA, 8'hD9, 8'hD8, 8'hD7, 8'hD6, 8'hD5, 8'hD4, 8'hD3, 8'hD2, 8'hD1, 8'hD0, 8'hCF, 8'hCE, 8'hCD, 8'hCC, 8'hCB, 8'hCA, 8'hC9, 8'hC8, 8'hC7, 8'hC6, 8'hC5, 8'hC4, 8'hC3, 8'hC2, 8'hC1, 8'hC0, 8'hBF, 8'hBE, 8'hBD, 8'hBC, 8'hBB, 8'hBA, 8'hB9, 8'hB8, 8'hB7, 8'hB6, 8'hB5, 8'hB4, 8'hB3, 8'hB2, 8'hB1, 8'hB0, 8'hAF, 8'hAE, 8'hAD, 8'hAC, 8'hAB, 8'hAA, 8'hA9, 8'hA8, 8'hA7, 8'hA6, 8'hA5, 8'hA4, 8'hA3, 8'hA2, 8'hA1, 8'hA0, 8'h9F, 8'h9E, 8'h9D, 8'h9C, 8'h9B, 8'h9A, 8'h99, 8'h98, 8'h97, 8'h96, 8'h95, 8'h94, 8'h93, 8'h92, 8'h91, 8'h90, 8'h8F, 8'h8E, 8'h8D, 8'h8C, 8'h8B, 8'h8A, 8'h89, 8'h88, 8'h87, 8'h86, 8'h85, 8'h84, 8'h83, 8'h82, 8'h81, 8'h80, 8'h7F, 8'h7E, 8'h7D, 8'h7C, 8'h7B, 8'h7A, 8'h79, 8'h78, 8'h77, 8'h76, 8'h75, 8'h74, 8'h73, 8'h72, 8'h71, 8'h70, 8'h6F, 8'h6E, 8'h6D, 8'h6C, 8'h6B, 8'h6A, 8'h69, 8'h68, 8'h67, 8'h66, 8'h65, 8'h64, 8'h63, 8'h62, 8'h61, 8'h60, 8'h5F, 8'h5E, 8'h5D, 8'h5C, 8'h5B, 8'h5A, 8'h59, 8'h58, 8'h57, 8'h56, 8'h55, 8'h54, 8'h53, 8'h52, 8'h51, 8'h50, 8'h4F, 8'h4E, 8'h4D, 8'h4C, 8'h4B, 8'h4A, 8'h49, 8'h48, 8'h47, 8'h46, 8'h45, 8'h44, 8'h43, 8'h42, 8'h41, 8'h40, 8'h3F, 8'h3E, 8'h3D, 8'h3C, 8'h3B, 8'h3A, 8'h39, 8'h38, 8'h37, 8'h36, 8'h35, 8'h34, 8'h33, 8'h32, 8'h31, 8'h30, 8'h2F, 8'h2E, 8'h2D, 8'h2C, 8'h2B, 8'h2A, 8'h29, 8'h28, 8'h27, 8'h26, 8'h25, 8'h24, 8'h23, 8'h22, 8'h21, 8'h20, 8'h1F, 8'h1E, 8'h1D, 8'h1C, 8'h1B, 8'h1A, 8'h19, 8'h18, 8'h17, 8'h16, 8'h15, 8'h14, 8'h13, 8'h12, 8'h11, 8'h10, 8'hF, 8'hE, 8'hD, 8'hC, 8'hB, 8'hA, 8'h9, 8'h8, 8'h7, 8'h6, 8'h5, 8'h4, 8'h3, 8'h2, 8'h1, 8'h0};
wire [1:0] auto_in_d_bits_param = 2'h0; // @[ToAXI4.scala:103:9]
wire [1:0] nodeIn_d_bits_param = 2'h0; // @[MixedNode.scala:551:17]
wire [1:0] r_d_param = 2'h0; // @[Edges.scala:810:17]
wire [1:0] b_d_param = 2'h0; // @[Edges.scala:792:17]
wire [1:0] _nodeIn_d_bits_T_param = 2'h0; // @[ToAXI4.scala:255:23]
wire auto_in_d_bits_sink = 1'h0; // @[ToAXI4.scala:103:9]
wire nodeIn_d_bits_sink = 1'h0; // @[MixedNode.scala:551:17]
wire _sourceStall_WIRE_0 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_1 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_2 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_3 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_4 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_5 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_6 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_7 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_8 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_9 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_10 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_11 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_12 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_13 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_14 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_15 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_16 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_17 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_18 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_19 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_20 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_21 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_22 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_23 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_24 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_25 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_26 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_27 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_28 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_29 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_30 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_31 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_32 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_33 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_34 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_35 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_36 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_37 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_38 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_39 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_40 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_41 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_42 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_43 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_44 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_45 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_46 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_47 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_48 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_49 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_50 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_51 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_52 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_53 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_54 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_55 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_56 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_57 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_58 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_59 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_60 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_61 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_62 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_63 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_64 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_65 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_66 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_67 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_68 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_69 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_70 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_71 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_72 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_73 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_74 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_75 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_76 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_77 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_78 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_79 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_80 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_81 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_82 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_83 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_84 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_85 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_86 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_87 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_88 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_89 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_90 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_91 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_92 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_93 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_94 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_95 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_96 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_97 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_98 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_99 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_100 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_101 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_102 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_103 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_104 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_105 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_106 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_107 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_108 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_109 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_110 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_111 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_112 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_113 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_114 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_115 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_116 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_117 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_118 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_119 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_120 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_121 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_122 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_123 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_124 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_125 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_126 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_127 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_128 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_129 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_130 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_131 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_132 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_133 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_134 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_135 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_136 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_137 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_138 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_139 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_140 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_141 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_142 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_143 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_144 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_145 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_146 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_147 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_148 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_149 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_150 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_151 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_152 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_153 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_154 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_155 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_156 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_157 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_158 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_159 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_160 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_161 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_162 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_163 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_164 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_165 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_166 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_167 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_168 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_169 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_170 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_171 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_172 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_173 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_174 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_175 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_176 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_177 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_178 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_179 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_180 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_181 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_182 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_183 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_184 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_185 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_186 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_187 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_188 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_189 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_190 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_191 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_192 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_193 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_194 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_195 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_196 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_197 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_198 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_199 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_200 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_201 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_202 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_203 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_204 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_205 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_206 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_207 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_208 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_209 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_210 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_211 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_212 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_213 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_214 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_215 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_216 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_217 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_218 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_219 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_220 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_221 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_222 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_223 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_224 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_225 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_226 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_227 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_228 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_229 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_230 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_231 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_232 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_233 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_234 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_235 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_236 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_237 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_238 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_239 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_240 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_241 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_242 = 1'h0; // @[ToAXI4.scala:113:76]
wire _sourceStall_WIRE_243 = 1'h0; // @[ToAXI4.scala:113:76]
wire _idStall_WIRE_0 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_1 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_2 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_3 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_4 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_5 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_6 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_7 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_8 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_9 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_10 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_11 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_12 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_13 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_14 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_15 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_16 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_17 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_18 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_19 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_20 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_21 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_22 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_23 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_24 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_25 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_26 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_27 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_28 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_29 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_30 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_31 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_32 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_33 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_34 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_35 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_36 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_37 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_38 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_39 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_40 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_41 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_42 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_43 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_44 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_45 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_46 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_47 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_48 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_49 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_50 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_51 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_52 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_53 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_54 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_55 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_56 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_57 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_58 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_59 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_60 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_61 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_62 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_63 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_64 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_65 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_66 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_67 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_68 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_69 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_70 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_71 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_72 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_73 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_74 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_75 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_76 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_77 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_78 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_79 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_80 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_81 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_82 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_83 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_84 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_85 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_86 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_87 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_88 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_89 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_90 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_91 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_92 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_93 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_94 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_95 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_96 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_97 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_98 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_99 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_100 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_101 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_102 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_103 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_104 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_105 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_106 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_107 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_108 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_109 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_110 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_111 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_112 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_113 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_114 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_115 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_116 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_117 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_118 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_119 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_120 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_121 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_122 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_123 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_124 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_125 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_126 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_127 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_128 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_129 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_130 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_131 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_132 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_133 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_134 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_135 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_136 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_137 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_138 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_139 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_140 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_141 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_142 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_143 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_144 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_145 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_146 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_147 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_148 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_149 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_150 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_151 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_152 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_153 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_154 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_155 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_156 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_157 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_158 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_159 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_160 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_161 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_162 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_163 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_164 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_165 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_166 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_167 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_168 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_169 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_170 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_171 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_172 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_173 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_174 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_175 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_176 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_177 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_178 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_179 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_180 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_181 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_182 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_183 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_184 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_185 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_186 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_187 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_188 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_189 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_190 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_191 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_192 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_193 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_194 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_195 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_196 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_197 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_198 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_199 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_200 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_201 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_202 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_203 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_204 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_205 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_206 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_207 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_208 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_209 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_210 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_211 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_212 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_213 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_214 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_215 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_216 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_217 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_218 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_219 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_220 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_221 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_222 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_223 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_224 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_225 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_226 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_227 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_228 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_229 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_230 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_231 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_232 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_233 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_234 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_235 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_236 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_237 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_238 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_239 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_240 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_241 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_242 = 1'h0; // @[ToAXI4.scala:115:67]
wire _idStall_WIRE_243 = 1'h0; // @[ToAXI4.scala:115:67]
wire out_arw_bits_lock = 1'h0; // @[ToAXI4.scala:153:25]
wire r_d_sink = 1'h0; // @[Edges.scala:810:17]
wire b_d_sink = 1'h0; // @[Edges.scala:792:17]
wire b_d_corrupt = 1'h0; // @[Edges.scala:792:17]
wire _nodeIn_d_bits_T_sink = 1'h0; // @[ToAXI4.scala:255:23]
wire _idStall_0_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_1_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_2_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_3_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_4_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_5_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_6_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_7_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_8_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_9_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_10_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_11_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_12_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_13_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_14_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_15_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_16_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_17_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_18_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_19_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_20_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_21_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_22_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_23_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_24_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_25_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_26_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_27_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_28_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_29_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_30_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_31_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_32_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_33_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_34_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_35_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_36_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_37_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_38_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_39_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_40_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_41_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_42_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_43_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_44_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_45_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_46_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_47_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_48_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_49_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_50_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_51_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_52_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_53_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_54_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_55_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_56_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_57_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_58_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_59_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_60_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_61_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_62_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_63_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_64_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_65_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_66_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_67_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_68_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_69_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_70_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_71_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_72_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_73_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_74_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_75_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_76_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_77_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_78_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_79_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_80_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_81_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_82_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_83_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_84_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_85_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_86_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_87_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_88_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_89_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_90_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_91_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_92_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_93_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_94_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_95_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_96_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_97_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_98_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_99_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_100_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_101_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_102_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_103_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_104_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_105_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_106_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_107_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_108_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_109_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_110_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_111_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_112_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_113_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_114_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_115_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_116_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_117_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_118_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_119_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_120_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_121_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_122_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_123_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_124_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_125_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_126_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_127_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_128_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_129_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_130_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_131_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_132_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_133_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_134_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_135_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_136_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_137_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_138_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_139_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_140_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_141_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_142_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_143_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_144_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_145_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_146_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_147_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_148_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_149_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_150_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_151_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_152_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_153_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_154_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_155_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_156_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_157_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_158_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_159_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_160_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_161_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_162_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_163_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_164_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_165_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_166_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_167_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_168_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_169_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_170_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_171_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_172_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_173_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_174_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_175_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_176_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_177_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_178_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_179_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_180_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_181_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_182_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_183_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_184_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_185_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_186_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_187_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_188_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_189_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_190_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_191_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_192_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_193_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_194_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_195_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_196_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_197_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_198_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_199_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_200_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_201_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_202_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_203_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_204_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_205_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_206_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_207_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_208_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_209_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_210_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_211_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_212_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_213_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_214_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_215_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_216_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_217_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_218_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_219_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_220_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_221_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_222_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_223_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_224_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_225_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_226_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_227_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_228_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_229_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_230_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_231_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_232_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_233_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_234_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_235_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_236_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_237_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_238_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_239_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_240_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_241_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_242_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire _idStall_243_T_1 = 1'h0; // @[ToAXI4.scala:286:21]
wire [63:0] r_d_data = 64'h0; // @[Edges.scala:810:17]
wire [63:0] b_d_data = 64'h0; // @[Edges.scala:792:17]
wire [63:0] _nodeIn_d_bits_T_data = 64'h0; // @[ToAXI4.scala:255:23]
wire [2:0] b_d_opcode = 3'h0; // @[Edges.scala:792:17]
wire [2:0] out_arw_bits_prot = 3'h1; // @[ToAXI4.scala:153:25]
wire [2:0] r_d_opcode = 3'h1; // @[Edges.scala:810:17]
wire [3:0] out_arw_bits_cache = 4'h0; // @[ToAXI4.scala:153:25]
wire [3:0] out_arw_bits_qos = 4'h0; // @[ToAXI4.scala:153:25]
wire [1:0] out_arw_bits_burst = 2'h1; // @[ToAXI4.scala:153:25]
wire [7:0] sourceTable_243 = 8'hF3; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_242 = 8'hF2; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_241 = 8'hF1; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_240 = 8'hF0; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_239 = 8'hEF; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_238 = 8'hEE; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_237 = 8'hED; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_236 = 8'hEC; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_235 = 8'hEB; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_234 = 8'hEA; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_233 = 8'hE9; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_232 = 8'hE8; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_231 = 8'hE7; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_230 = 8'hE6; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_229 = 8'hE5; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_228 = 8'hE4; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_227 = 8'hE3; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_226 = 8'hE2; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_225 = 8'hE1; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_224 = 8'hE0; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_223 = 8'hDF; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_222 = 8'hDE; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_221 = 8'hDD; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_220 = 8'hDC; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_219 = 8'hDB; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_218 = 8'hDA; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_217 = 8'hD9; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_216 = 8'hD8; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_215 = 8'hD7; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_214 = 8'hD6; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_213 = 8'hD5; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_212 = 8'hD4; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_211 = 8'hD3; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_210 = 8'hD2; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_209 = 8'hD1; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_208 = 8'hD0; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_207 = 8'hCF; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_206 = 8'hCE; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_205 = 8'hCD; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_204 = 8'hCC; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_203 = 8'hCB; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_202 = 8'hCA; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_201 = 8'hC9; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_200 = 8'hC8; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_199 = 8'hC7; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_198 = 8'hC6; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_197 = 8'hC5; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_196 = 8'hC4; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_195 = 8'hC3; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_194 = 8'hC2; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_193 = 8'hC1; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_192 = 8'hC0; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_191 = 8'hBF; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_190 = 8'hBE; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_189 = 8'hBD; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_188 = 8'hBC; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_187 = 8'hBB; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_186 = 8'hBA; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_185 = 8'hB9; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_184 = 8'hB8; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_183 = 8'hB7; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_182 = 8'hB6; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_181 = 8'hB5; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_180 = 8'hB4; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_179 = 8'hB3; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_178 = 8'hB2; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_177 = 8'hB1; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_176 = 8'hB0; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_175 = 8'hAF; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_174 = 8'hAE; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_173 = 8'hAD; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_172 = 8'hAC; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_171 = 8'hAB; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_170 = 8'hAA; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_169 = 8'hA9; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_168 = 8'hA8; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_167 = 8'hA7; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_166 = 8'hA6; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_165 = 8'hA5; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_164 = 8'hA4; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_163 = 8'hA3; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_162 = 8'hA2; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_161 = 8'hA1; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_160 = 8'hA0; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_159 = 8'h9F; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_158 = 8'h9E; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_157 = 8'h9D; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_156 = 8'h9C; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_155 = 8'h9B; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_154 = 8'h9A; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_153 = 8'h99; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_152 = 8'h98; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_151 = 8'h97; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_150 = 8'h96; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_149 = 8'h95; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_148 = 8'h94; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_147 = 8'h93; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_146 = 8'h92; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_145 = 8'h91; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_144 = 8'h90; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_143 = 8'h8F; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_142 = 8'h8E; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_141 = 8'h8D; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_140 = 8'h8C; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_139 = 8'h8B; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_138 = 8'h8A; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_137 = 8'h89; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_136 = 8'h88; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_135 = 8'h87; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_134 = 8'h86; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_133 = 8'h85; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_132 = 8'h84; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_131 = 8'h83; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_130 = 8'h82; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_129 = 8'h81; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_128 = 8'h80; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_127 = 8'h7F; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_126 = 8'h7E; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_125 = 8'h7D; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_124 = 8'h7C; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_123 = 8'h7B; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_122 = 8'h7A; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_121 = 8'h79; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_120 = 8'h78; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_119 = 8'h77; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_118 = 8'h76; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_117 = 8'h75; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_116 = 8'h74; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_115 = 8'h73; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_114 = 8'h72; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_113 = 8'h71; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_112 = 8'h70; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_111 = 8'h6F; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_110 = 8'h6E; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_109 = 8'h6D; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_108 = 8'h6C; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_107 = 8'h6B; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_106 = 8'h6A; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_105 = 8'h69; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_104 = 8'h68; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_103 = 8'h67; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_102 = 8'h66; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_101 = 8'h65; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_100 = 8'h64; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_99 = 8'h63; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_98 = 8'h62; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_97 = 8'h61; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_96 = 8'h60; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_95 = 8'h5F; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_94 = 8'h5E; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_93 = 8'h5D; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_92 = 8'h5C; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_91 = 8'h5B; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_90 = 8'h5A; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_89 = 8'h59; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_88 = 8'h58; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_87 = 8'h57; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_86 = 8'h56; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_85 = 8'h55; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_84 = 8'h54; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_83 = 8'h53; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_82 = 8'h52; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_81 = 8'h51; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_80 = 8'h50; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_79 = 8'h4F; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_78 = 8'h4E; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_77 = 8'h4D; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_76 = 8'h4C; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_75 = 8'h4B; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_74 = 8'h4A; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_73 = 8'h49; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_72 = 8'h48; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_71 = 8'h47; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_70 = 8'h46; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_69 = 8'h45; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_68 = 8'h44; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_67 = 8'h43; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_66 = 8'h42; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_65 = 8'h41; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_64 = 8'h40; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_63 = 8'h3F; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_62 = 8'h3E; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_61 = 8'h3D; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_60 = 8'h3C; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_59 = 8'h3B; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_58 = 8'h3A; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_57 = 8'h39; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_56 = 8'h38; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_55 = 8'h37; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_54 = 8'h36; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_53 = 8'h35; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_52 = 8'h34; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_51 = 8'h33; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_50 = 8'h32; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_49 = 8'h31; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_48 = 8'h30; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_47 = 8'h2F; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_46 = 8'h2E; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_45 = 8'h2D; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_44 = 8'h2C; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_43 = 8'h2B; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_42 = 8'h2A; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_41 = 8'h29; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_40 = 8'h28; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_39 = 8'h27; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_38 = 8'h26; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_37 = 8'h25; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_36 = 8'h24; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_35 = 8'h23; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_34 = 8'h22; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_33 = 8'h21; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_32 = 8'h20; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_31 = 8'h1F; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_30 = 8'h1E; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_29 = 8'h1D; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_28 = 8'h1C; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_27 = 8'h1B; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_26 = 8'h1A; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_25 = 8'h19; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_24 = 8'h18; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_23 = 8'h17; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_22 = 8'h16; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_21 = 8'h15; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_20 = 8'h14; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_19 = 8'h13; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_18 = 8'h12; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_17 = 8'h11; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_16 = 8'h10; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_15 = 8'hF; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_14 = 8'hE; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_13 = 8'hD; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_12 = 8'hC; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_11 = 8'hB; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_10 = 8'hA; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_9 = 8'h9; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_8 = 8'h8; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_7 = 8'h7; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_6 = 8'h6; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_5 = 8'h5; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_4 = 8'h4; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_3 = 8'h3; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_2 = 8'h2; // @[ToAXI4.scala:114:36]
wire [7:0] sourceTable_1 = 8'h1; // @[ToAXI4.scala:114:36]
wire [7:0] _sourceTable_WIRE = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_1 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_2 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_3 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_4 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_5 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_6 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_7 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_8 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_9 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_10 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_11 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_12 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_13 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_14 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_15 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_16 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_17 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_18 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_19 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_20 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_21 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_22 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_23 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_24 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_25 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_26 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_27 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_28 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_29 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_30 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_31 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_32 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_33 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_34 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_35 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_36 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_37 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_38 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_39 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_40 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_41 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_42 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_43 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_44 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_45 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_46 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_47 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_48 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_49 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_50 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_51 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_52 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_53 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_54 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_55 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_56 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_57 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_58 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_59 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_60 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_61 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_62 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_63 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_64 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_65 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_66 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_67 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_68 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_69 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_70 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_71 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_72 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_73 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_74 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_75 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_76 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_77 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_78 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_79 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_80 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_81 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_82 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_83 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_84 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_85 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_86 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_87 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_88 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_89 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_90 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_91 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_92 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_93 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_94 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_95 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_96 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_97 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_98 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_99 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_100 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_101 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_102 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_103 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_104 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_105 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_106 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_107 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_108 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_109 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_110 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_111 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_112 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_113 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_114 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_115 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_116 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_117 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_118 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_119 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_120 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_121 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_122 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_123 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_124 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_125 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_126 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_127 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_128 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_129 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_130 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_131 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_132 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_133 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_134 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_135 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_136 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_137 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_138 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_139 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_140 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_141 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_142 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_143 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_144 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_145 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_146 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_147 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_148 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_149 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_150 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_151 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_152 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_153 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_154 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_155 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_156 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_157 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_158 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_159 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_160 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_161 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_162 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_163 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_164 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_165 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_166 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_167 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_168 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_169 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_170 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_171 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_172 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_173 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_174 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_175 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_176 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_177 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_178 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_179 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_180 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_181 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_182 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_183 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_184 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_185 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_186 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_187 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_188 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_189 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_190 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_191 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_192 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_193 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_194 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_195 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_196 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_197 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_198 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_199 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_200 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_201 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_202 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_203 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_204 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_205 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_206 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_207 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_208 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_209 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_210 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_211 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_212 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_213 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_214 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_215 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_216 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_217 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_218 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_219 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_220 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_221 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_222 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_223 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_224 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_225 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_226 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_227 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_228 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_229 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_230 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_231 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_232 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_233 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_234 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_235 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_236 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_237 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_238 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_239 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_240 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_241 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_242 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_243 = 8'h0; // @[ToAXI4.scala:114:89]
wire [7:0] _sourceTable_WIRE_244_0 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_1 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_2 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_3 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_4 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_5 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_6 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_7 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_8 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_9 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_10 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_11 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_12 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_13 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_14 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_15 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_16 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_17 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_18 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_19 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_20 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_21 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_22 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_23 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_24 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_25 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_26 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_27 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_28 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_29 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_30 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_31 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_32 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_33 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_34 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_35 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_36 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_37 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_38 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_39 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_40 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_41 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_42 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_43 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_44 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_45 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_46 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_47 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_48 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_49 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_50 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_51 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_52 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_53 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_54 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_55 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_56 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_57 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_58 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_59 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_60 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_61 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_62 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_63 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_64 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_65 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_66 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_67 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_68 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_69 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_70 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_71 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_72 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_73 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_74 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_75 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_76 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_77 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_78 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_79 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_80 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_81 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_82 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_83 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_84 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_85 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_86 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_87 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_88 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_89 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_90 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_91 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_92 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_93 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_94 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_95 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_96 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_97 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_98 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_99 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_100 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_101 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_102 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_103 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_104 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_105 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_106 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_107 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_108 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_109 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_110 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_111 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_112 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_113 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_114 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_115 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_116 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_117 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_118 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_119 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_120 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_121 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_122 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_123 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_124 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_125 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_126 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_127 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_128 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_129 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_130 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_131 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_132 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_133 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_134 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_135 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_136 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_137 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_138 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_139 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_140 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_141 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_142 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_143 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_144 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_145 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_146 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_147 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_148 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_149 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_150 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_151 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_152 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_153 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_154 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_155 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_156 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_157 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_158 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_159 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_160 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_161 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_162 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_163 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_164 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_165 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_166 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_167 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_168 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_169 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_170 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_171 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_172 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_173 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_174 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_175 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_176 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_177 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_178 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_179 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_180 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_181 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_182 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_183 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_184 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_185 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_186 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_187 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_188 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_189 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_190 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_191 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_192 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_193 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_194 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_195 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_196 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_197 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_198 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_199 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_200 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_201 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_202 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_203 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_204 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_205 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_206 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_207 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_208 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_209 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_210 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_211 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_212 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_213 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_214 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_215 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_216 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_217 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_218 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_219 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_220 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_221 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_222 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_223 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_224 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_225 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_226 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_227 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_228 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_229 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_230 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_231 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_232 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_233 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_234 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_235 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_236 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_237 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_238 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_239 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_240 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_241 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_242 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] _sourceTable_WIRE_244_243 = 8'h0; // @[ToAXI4.scala:114:76]
wire [7:0] sourceTable_0 = 8'h0; // @[ToAXI4.scala:114:36]
wire nodeIn_a_ready; // @[MixedNode.scala:551:17]
wire nodeIn_a_valid = auto_in_a_valid_0; // @[ToAXI4.scala:103:9]
wire [2:0] nodeIn_a_bits_opcode = auto_in_a_bits_opcode_0; // @[ToAXI4.scala:103:9]
wire [2:0] nodeIn_a_bits_param = auto_in_a_bits_param_0; // @[ToAXI4.scala:103:9]
wire [2:0] nodeIn_a_bits_size = auto_in_a_bits_size_0; // @[ToAXI4.scala:103:9]
wire [7:0] nodeIn_a_bits_source = auto_in_a_bits_source_0; // @[ToAXI4.scala:103:9]
wire [31:0] nodeIn_a_bits_address = auto_in_a_bits_address_0; // @[ToAXI4.scala:103:9]
wire [7:0] nodeIn_a_bits_mask = auto_in_a_bits_mask_0; // @[ToAXI4.scala:103:9]
wire [63:0] nodeIn_a_bits_data = auto_in_a_bits_data_0; // @[ToAXI4.scala:103:9]
wire nodeIn_a_bits_corrupt = auto_in_a_bits_corrupt_0; // @[ToAXI4.scala:103:9]
wire nodeIn_d_ready = auto_in_d_ready_0; // @[ToAXI4.scala:103:9]
wire nodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [7:0] nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] nodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire nodeOut_aw_ready = auto_out_aw_ready_0; // @[ToAXI4.scala:103:9]
wire nodeOut_aw_valid; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_aw_bits_id; // @[MixedNode.scala:542:17]
wire [31:0] nodeOut_aw_bits_addr; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_aw_bits_len; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_aw_bits_size; // @[MixedNode.scala:542:17]
wire [1:0] nodeOut_aw_bits_burst; // @[MixedNode.scala:542:17]
wire nodeOut_aw_bits_lock; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_aw_bits_cache; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_aw_bits_prot; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_aw_bits_qos; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_aw_bits_echo_tl_state_size; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_aw_bits_echo_tl_state_source; // @[MixedNode.scala:542:17]
wire nodeOut_w_ready = auto_out_w_ready_0; // @[ToAXI4.scala:103:9]
wire nodeOut_w_valid; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_w_bits_data; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_w_bits_strb; // @[MixedNode.scala:542:17]
wire nodeOut_w_bits_last; // @[MixedNode.scala:542:17]
wire nodeOut_b_ready; // @[MixedNode.scala:542:17]
wire nodeOut_b_valid = auto_out_b_valid_0; // @[ToAXI4.scala:103:9]
wire [7:0] nodeOut_b_bits_id = auto_out_b_bits_id_0; // @[ToAXI4.scala:103:9]
wire [1:0] nodeOut_b_bits_resp = auto_out_b_bits_resp_0; // @[ToAXI4.scala:103:9]
wire [3:0] nodeOut_b_bits_echo_tl_state_size = auto_out_b_bits_echo_tl_state_size_0; // @[ToAXI4.scala:103:9]
wire [7:0] nodeOut_b_bits_echo_tl_state_source = auto_out_b_bits_echo_tl_state_source_0; // @[ToAXI4.scala:103:9]
wire nodeOut_ar_ready = auto_out_ar_ready_0; // @[ToAXI4.scala:103:9]
wire nodeOut_ar_valid; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_ar_bits_id; // @[MixedNode.scala:542:17]
wire [31:0] nodeOut_ar_bits_addr; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_ar_bits_len; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_ar_bits_size; // @[MixedNode.scala:542:17]
wire [1:0] nodeOut_ar_bits_burst; // @[MixedNode.scala:542:17]
wire nodeOut_ar_bits_lock; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_ar_bits_cache; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_ar_bits_prot; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_ar_bits_qos; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_ar_bits_echo_tl_state_size; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_ar_bits_echo_tl_state_source; // @[MixedNode.scala:542:17]
wire nodeOut_r_ready; // @[MixedNode.scala:542:17]
wire nodeOut_r_valid = auto_out_r_valid_0; // @[ToAXI4.scala:103:9]
wire [7:0] nodeOut_r_bits_id = auto_out_r_bits_id_0; // @[ToAXI4.scala:103:9]
wire [63:0] nodeOut_r_bits_data = auto_out_r_bits_data_0; // @[ToAXI4.scala:103:9]
wire [1:0] nodeOut_r_bits_resp = auto_out_r_bits_resp_0; // @[ToAXI4.scala:103:9]
wire [3:0] nodeOut_r_bits_echo_tl_state_size = auto_out_r_bits_echo_tl_state_size_0; // @[ToAXI4.scala:103:9]
wire [7:0] nodeOut_r_bits_echo_tl_state_source = auto_out_r_bits_echo_tl_state_source_0; // @[ToAXI4.scala:103:9]
wire nodeOut_r_bits_last = auto_out_r_bits_last_0; // @[ToAXI4.scala:103:9]
wire auto_in_a_ready_0; // @[ToAXI4.scala:103:9]
wire [2:0] auto_in_d_bits_opcode_0; // @[ToAXI4.scala:103:9]
wire [2:0] auto_in_d_bits_size_0; // @[ToAXI4.scala:103:9]
wire [7:0] auto_in_d_bits_source_0; // @[ToAXI4.scala:103:9]
wire auto_in_d_bits_denied_0; // @[ToAXI4.scala:103:9]
wire [63:0] auto_in_d_bits_data_0; // @[ToAXI4.scala:103:9]
wire auto_in_d_bits_corrupt_0; // @[ToAXI4.scala:103:9]
wire auto_in_d_valid_0; // @[ToAXI4.scala:103:9]
wire [3:0] auto_out_aw_bits_echo_tl_state_size_0; // @[ToAXI4.scala:103:9]
wire [7:0] auto_out_aw_bits_echo_tl_state_source_0; // @[ToAXI4.scala:103:9]
wire [7:0] auto_out_aw_bits_id_0; // @[ToAXI4.scala:103:9]
wire [31:0] auto_out_aw_bits_addr_0; // @[ToAXI4.scala:103:9]
wire [7:0] auto_out_aw_bits_len_0; // @[ToAXI4.scala:103:9]
wire [2:0] auto_out_aw_bits_size_0; // @[ToAXI4.scala:103:9]
wire [1:0] auto_out_aw_bits_burst_0; // @[ToAXI4.scala:103:9]
wire auto_out_aw_bits_lock_0; // @[ToAXI4.scala:103:9]
wire [3:0] auto_out_aw_bits_cache_0; // @[ToAXI4.scala:103:9]
wire [2:0] auto_out_aw_bits_prot_0; // @[ToAXI4.scala:103:9]
wire [3:0] auto_out_aw_bits_qos_0; // @[ToAXI4.scala:103:9]
wire auto_out_aw_valid_0; // @[ToAXI4.scala:103:9]
wire [63:0] auto_out_w_bits_data_0; // @[ToAXI4.scala:103:9]
wire [7:0] auto_out_w_bits_strb_0; // @[ToAXI4.scala:103:9]
wire auto_out_w_bits_last_0; // @[ToAXI4.scala:103:9]
wire auto_out_w_valid_0; // @[ToAXI4.scala:103:9]
wire auto_out_b_ready_0; // @[ToAXI4.scala:103:9]
wire [3:0] auto_out_ar_bits_echo_tl_state_size_0; // @[ToAXI4.scala:103:9]
wire [7:0] auto_out_ar_bits_echo_tl_state_source_0; // @[ToAXI4.scala:103:9]
wire [7:0] auto_out_ar_bits_id_0; // @[ToAXI4.scala:103:9]
wire [31:0] auto_out_ar_bits_addr_0; // @[ToAXI4.scala:103:9]
wire [7:0] auto_out_ar_bits_len_0; // @[ToAXI4.scala:103:9]
wire [2:0] auto_out_ar_bits_size_0; // @[ToAXI4.scala:103:9]
wire [1:0] auto_out_ar_bits_burst_0; // @[ToAXI4.scala:103:9]
wire auto_out_ar_bits_lock_0; // @[ToAXI4.scala:103:9]
wire [3:0] auto_out_ar_bits_cache_0; // @[ToAXI4.scala:103:9]
wire [2:0] auto_out_ar_bits_prot_0; // @[ToAXI4.scala:103:9]
wire [3:0] auto_out_ar_bits_qos_0; // @[ToAXI4.scala:103:9]
wire auto_out_ar_valid_0; // @[ToAXI4.scala:103:9]
wire auto_out_r_ready_0; // @[ToAXI4.scala:103:9]
wire _nodeIn_a_ready_T_4; // @[ToAXI4.scala:206:28]
assign auto_in_a_ready_0 = nodeIn_a_ready; // @[ToAXI4.scala:103:9]
wire [7:0] out_arw_bits_echo_tl_state_source = nodeIn_a_bits_source; // @[ToAXI4.scala:153:25]
wire [31:0] out_arw_bits_addr = nodeIn_a_bits_address; // @[ToAXI4.scala:153:25]
wire [7:0] out_w_bits_strb = nodeIn_a_bits_mask; // @[ToAXI4.scala:154:23]
wire [63:0] out_w_bits_data = nodeIn_a_bits_data; // @[ToAXI4.scala:154:23]
wire _nodeIn_d_valid_T; // @[ToAXI4.scala:229:24]
assign auto_in_d_valid_0 = nodeIn_d_valid; // @[ToAXI4.scala:103:9]
wire [2:0] _nodeIn_d_bits_T_opcode; // @[ToAXI4.scala:255:23]
assign auto_in_d_bits_opcode_0 = nodeIn_d_bits_opcode; // @[ToAXI4.scala:103:9]
wire [2:0] _nodeIn_d_bits_T_size; // @[ToAXI4.scala:255:23]
assign auto_in_d_bits_size_0 = nodeIn_d_bits_size; // @[ToAXI4.scala:103:9]
wire [7:0] _nodeIn_d_bits_T_source; // @[ToAXI4.scala:255:23]
assign auto_in_d_bits_source_0 = nodeIn_d_bits_source; // @[ToAXI4.scala:103:9]
wire _nodeIn_d_bits_T_denied; // @[ToAXI4.scala:255:23]
assign auto_in_d_bits_denied_0 = nodeIn_d_bits_denied; // @[ToAXI4.scala:103:9]
assign auto_in_d_bits_data_0 = nodeIn_d_bits_data; // @[ToAXI4.scala:103:9]
wire _nodeIn_d_bits_T_corrupt; // @[ToAXI4.scala:255:23]
assign auto_in_d_bits_corrupt_0 = nodeIn_d_bits_corrupt; // @[ToAXI4.scala:103:9]
wire _nodeOut_aw_valid_T; // @[ToAXI4.scala:162:39]
assign auto_out_aw_valid_0 = nodeOut_aw_valid; // @[ToAXI4.scala:103:9]
wire [7:0] queue_arw_bits_id; // @[Decoupled.scala:401:19]
assign auto_out_aw_bits_id_0 = nodeOut_aw_bits_id; // @[ToAXI4.scala:103:9]
wire [31:0] queue_arw_bits_addr; // @[Decoupled.scala:401:19]
assign auto_out_aw_bits_addr_0 = nodeOut_aw_bits_addr; // @[ToAXI4.scala:103:9]
wire [7:0] queue_arw_bits_len; // @[Decoupled.scala:401:19]
assign auto_out_aw_bits_len_0 = nodeOut_aw_bits_len; // @[ToAXI4.scala:103:9]
wire [2:0] queue_arw_bits_size; // @[Decoupled.scala:401:19]
assign auto_out_aw_bits_size_0 = nodeOut_aw_bits_size; // @[ToAXI4.scala:103:9]
wire [1:0] queue_arw_bits_burst; // @[Decoupled.scala:401:19]
assign auto_out_aw_bits_burst_0 = nodeOut_aw_bits_burst; // @[ToAXI4.scala:103:9]
wire queue_arw_bits_lock; // @[Decoupled.scala:401:19]
assign auto_out_aw_bits_lock_0 = nodeOut_aw_bits_lock; // @[ToAXI4.scala:103:9]
wire [3:0] queue_arw_bits_cache; // @[Decoupled.scala:401:19]
assign auto_out_aw_bits_cache_0 = nodeOut_aw_bits_cache; // @[ToAXI4.scala:103:9]
wire [2:0] queue_arw_bits_prot; // @[Decoupled.scala:401:19]
assign auto_out_aw_bits_prot_0 = nodeOut_aw_bits_prot; // @[ToAXI4.scala:103:9]
wire [3:0] queue_arw_bits_qos; // @[Decoupled.scala:401:19]
assign auto_out_aw_bits_qos_0 = nodeOut_aw_bits_qos; // @[ToAXI4.scala:103:9]
wire [3:0] queue_arw_bits_echo_tl_state_size; // @[Decoupled.scala:401:19]
assign auto_out_aw_bits_echo_tl_state_size_0 = nodeOut_aw_bits_echo_tl_state_size; // @[ToAXI4.scala:103:9]
wire [7:0] queue_arw_bits_echo_tl_state_source; // @[Decoupled.scala:401:19]
assign auto_out_aw_bits_echo_tl_state_source_0 = nodeOut_aw_bits_echo_tl_state_source; // @[ToAXI4.scala:103:9]
wire nodeOut_w_irr_ready = nodeOut_w_ready; // @[Decoupled.scala:401:19]
wire nodeOut_w_irr_valid; // @[Decoupled.scala:401:19]
assign auto_out_w_valid_0 = nodeOut_w_valid; // @[ToAXI4.scala:103:9]
wire [63:0] nodeOut_w_irr_bits_data; // @[Decoupled.scala:401:19]
assign auto_out_w_bits_data_0 = nodeOut_w_bits_data; // @[ToAXI4.scala:103:9]
wire [7:0] nodeOut_w_irr_bits_strb; // @[Decoupled.scala:401:19]
assign auto_out_w_bits_strb_0 = nodeOut_w_bits_strb; // @[ToAXI4.scala:103:9]
wire nodeOut_w_irr_bits_last; // @[Decoupled.scala:401:19]
assign auto_out_w_bits_last_0 = nodeOut_w_bits_last; // @[ToAXI4.scala:103:9]
wire _nodeOut_b_ready_T_1; // @[ToAXI4.scala:228:33]
assign auto_out_b_ready_0 = nodeOut_b_ready; // @[ToAXI4.scala:103:9]
wire [7:0] b_d_source = nodeOut_b_bits_echo_tl_state_source; // @[Edges.scala:792:17]
wire _nodeOut_ar_valid_T_1; // @[ToAXI4.scala:161:39]
assign auto_out_ar_valid_0 = nodeOut_ar_valid; // @[ToAXI4.scala:103:9]
assign auto_out_ar_bits_id_0 = nodeOut_ar_bits_id; // @[ToAXI4.scala:103:9]
assign auto_out_ar_bits_addr_0 = nodeOut_ar_bits_addr; // @[ToAXI4.scala:103:9]
assign auto_out_ar_bits_len_0 = nodeOut_ar_bits_len; // @[ToAXI4.scala:103:9]
assign auto_out_ar_bits_size_0 = nodeOut_ar_bits_size; // @[ToAXI4.scala:103:9]
assign auto_out_ar_bits_burst_0 = nodeOut_ar_bits_burst; // @[ToAXI4.scala:103:9]
assign auto_out_ar_bits_lock_0 = nodeOut_ar_bits_lock; // @[ToAXI4.scala:103:9]
assign auto_out_ar_bits_cache_0 = nodeOut_ar_bits_cache; // @[ToAXI4.scala:103:9]
assign auto_out_ar_bits_prot_0 = nodeOut_ar_bits_prot; // @[ToAXI4.scala:103:9]
assign auto_out_ar_bits_qos_0 = nodeOut_ar_bits_qos; // @[ToAXI4.scala:103:9]
assign auto_out_ar_bits_echo_tl_state_size_0 = nodeOut_ar_bits_echo_tl_state_size; // @[ToAXI4.scala:103:9]
assign auto_out_ar_bits_echo_tl_state_source_0 = nodeOut_ar_bits_echo_tl_state_source; // @[ToAXI4.scala:103:9]
wire _nodeOut_r_ready_T; // @[ToAXI4.scala:227:33]
assign auto_out_r_ready_0 = nodeOut_r_ready; // @[ToAXI4.scala:103:9]
assign nodeIn_d_bits_data = nodeOut_r_bits_data; // @[MixedNode.scala:542:17, :551:17]
wire [7:0] r_d_source = nodeOut_r_bits_echo_tl_state_source; // @[Edges.scala:810:17]
wire idStall_0; // @[ToAXI4.scala:115:32]
wire idStall_1; // @[ToAXI4.scala:115:32]
wire idStall_2; // @[ToAXI4.scala:115:32]
wire idStall_3; // @[ToAXI4.scala:115:32]
wire idStall_4; // @[ToAXI4.scala:115:32]
wire idStall_5; // @[ToAXI4.scala:115:32]
wire idStall_6; // @[ToAXI4.scala:115:32]
wire idStall_7; // @[ToAXI4.scala:115:32]
wire idStall_8; // @[ToAXI4.scala:115:32]
wire idStall_9; // @[ToAXI4.scala:115:32]
wire idStall_10; // @[ToAXI4.scala:115:32]
wire idStall_11; // @[ToAXI4.scala:115:32]
wire idStall_12; // @[ToAXI4.scala:115:32]
wire idStall_13; // @[ToAXI4.scala:115:32]
wire idStall_14; // @[ToAXI4.scala:115:32]
wire idStall_15; // @[ToAXI4.scala:115:32]
wire idStall_16; // @[ToAXI4.scala:115:32]
wire idStall_17; // @[ToAXI4.scala:115:32]
wire idStall_18; // @[ToAXI4.scala:115:32]
wire idStall_19; // @[ToAXI4.scala:115:32]
wire idStall_20; // @[ToAXI4.scala:115:32]
wire idStall_21; // @[ToAXI4.scala:115:32]
wire idStall_22; // @[ToAXI4.scala:115:32]
wire idStall_23; // @[ToAXI4.scala:115:32]
wire idStall_24; // @[ToAXI4.scala:115:32]
wire idStall_25; // @[ToAXI4.scala:115:32]
wire idStall_26; // @[ToAXI4.scala:115:32]
wire idStall_27; // @[ToAXI4.scala:115:32]
wire idStall_28; // @[ToAXI4.scala:115:32]
wire idStall_29; // @[ToAXI4.scala:115:32]
wire idStall_30; // @[ToAXI4.scala:115:32]
wire idStall_31; // @[ToAXI4.scala:115:32]
wire idStall_32; // @[ToAXI4.scala:115:32]
wire idStall_33; // @[ToAXI4.scala:115:32]
wire idStall_34; // @[ToAXI4.scala:115:32]
wire idStall_35; // @[ToAXI4.scala:115:32]
wire idStall_36; // @[ToAXI4.scala:115:32]
wire idStall_37; // @[ToAXI4.scala:115:32]
wire idStall_38; // @[ToAXI4.scala:115:32]
wire idStall_39; // @[ToAXI4.scala:115:32]
wire idStall_40; // @[ToAXI4.scala:115:32]
wire idStall_41; // @[ToAXI4.scala:115:32]
wire idStall_42; // @[ToAXI4.scala:115:32]
wire idStall_43; // @[ToAXI4.scala:115:32]
wire idStall_44; // @[ToAXI4.scala:115:32]
wire idStall_45; // @[ToAXI4.scala:115:32]
wire idStall_46; // @[ToAXI4.scala:115:32]
wire idStall_47; // @[ToAXI4.scala:115:32]
wire idStall_48; // @[ToAXI4.scala:115:32]
wire idStall_49; // @[ToAXI4.scala:115:32]
wire idStall_50; // @[ToAXI4.scala:115:32]
wire idStall_51; // @[ToAXI4.scala:115:32]
wire idStall_52; // @[ToAXI4.scala:115:32]
wire idStall_53; // @[ToAXI4.scala:115:32]
wire idStall_54; // @[ToAXI4.scala:115:32]
wire idStall_55; // @[ToAXI4.scala:115:32]
wire idStall_56; // @[ToAXI4.scala:115:32]
wire idStall_57; // @[ToAXI4.scala:115:32]
wire idStall_58; // @[ToAXI4.scala:115:32]
wire idStall_59; // @[ToAXI4.scala:115:32]
wire idStall_60; // @[ToAXI4.scala:115:32]
wire idStall_61; // @[ToAXI4.scala:115:32]
wire idStall_62; // @[ToAXI4.scala:115:32]
wire idStall_63; // @[ToAXI4.scala:115:32]
wire idStall_64; // @[ToAXI4.scala:115:32]
wire idStall_65; // @[ToAXI4.scala:115:32]
wire idStall_66; // @[ToAXI4.scala:115:32]
wire idStall_67; // @[ToAXI4.scala:115:32]
wire idStall_68; // @[ToAXI4.scala:115:32]
wire idStall_69; // @[ToAXI4.scala:115:32]
wire idStall_70; // @[ToAXI4.scala:115:32]
wire idStall_71; // @[ToAXI4.scala:115:32]
wire idStall_72; // @[ToAXI4.scala:115:32]
wire idStall_73; // @[ToAXI4.scala:115:32]
wire idStall_74; // @[ToAXI4.scala:115:32]
wire idStall_75; // @[ToAXI4.scala:115:32]
wire idStall_76; // @[ToAXI4.scala:115:32]
wire idStall_77; // @[ToAXI4.scala:115:32]
wire idStall_78; // @[ToAXI4.scala:115:32]
wire idStall_79; // @[ToAXI4.scala:115:32]
wire idStall_80; // @[ToAXI4.scala:115:32]
wire idStall_81; // @[ToAXI4.scala:115:32]
wire idStall_82; // @[ToAXI4.scala:115:32]
wire idStall_83; // @[ToAXI4.scala:115:32]
wire idStall_84; // @[ToAXI4.scala:115:32]
wire idStall_85; // @[ToAXI4.scala:115:32]
wire idStall_86; // @[ToAXI4.scala:115:32]
wire idStall_87; // @[ToAXI4.scala:115:32]
wire idStall_88; // @[ToAXI4.scala:115:32]
wire idStall_89; // @[ToAXI4.scala:115:32]
wire idStall_90; // @[ToAXI4.scala:115:32]
wire idStall_91; // @[ToAXI4.scala:115:32]
wire idStall_92; // @[ToAXI4.scala:115:32]
wire idStall_93; // @[ToAXI4.scala:115:32]
wire idStall_94; // @[ToAXI4.scala:115:32]
wire idStall_95; // @[ToAXI4.scala:115:32]
wire idStall_96; // @[ToAXI4.scala:115:32]
wire idStall_97; // @[ToAXI4.scala:115:32]
wire idStall_98; // @[ToAXI4.scala:115:32]
wire idStall_99; // @[ToAXI4.scala:115:32]
wire idStall_100; // @[ToAXI4.scala:115:32]
wire idStall_101; // @[ToAXI4.scala:115:32]
wire idStall_102; // @[ToAXI4.scala:115:32]
wire idStall_103; // @[ToAXI4.scala:115:32]
wire idStall_104; // @[ToAXI4.scala:115:32]
wire idStall_105; // @[ToAXI4.scala:115:32]
wire idStall_106; // @[ToAXI4.scala:115:32]
wire idStall_107; // @[ToAXI4.scala:115:32]
wire idStall_108; // @[ToAXI4.scala:115:32]
wire idStall_109; // @[ToAXI4.scala:115:32]
wire idStall_110; // @[ToAXI4.scala:115:32]
wire idStall_111; // @[ToAXI4.scala:115:32]
wire idStall_112; // @[ToAXI4.scala:115:32]
wire idStall_113; // @[ToAXI4.scala:115:32]
wire idStall_114; // @[ToAXI4.scala:115:32]
wire idStall_115; // @[ToAXI4.scala:115:32]
wire idStall_116; // @[ToAXI4.scala:115:32]
wire idStall_117; // @[ToAXI4.scala:115:32]
wire idStall_118; // @[ToAXI4.scala:115:32]
wire idStall_119; // @[ToAXI4.scala:115:32]
wire idStall_120; // @[ToAXI4.scala:115:32]
wire idStall_121; // @[ToAXI4.scala:115:32]
wire idStall_122; // @[ToAXI4.scala:115:32]
wire idStall_123; // @[ToAXI4.scala:115:32]
wire idStall_124; // @[ToAXI4.scala:115:32]
wire idStall_125; // @[ToAXI4.scala:115:32]
wire idStall_126; // @[ToAXI4.scala:115:32]
wire idStall_127; // @[ToAXI4.scala:115:32]
wire idStall_128; // @[ToAXI4.scala:115:32]
wire idStall_129; // @[ToAXI4.scala:115:32]
wire idStall_130; // @[ToAXI4.scala:115:32]
wire idStall_131; // @[ToAXI4.scala:115:32]
wire idStall_132; // @[ToAXI4.scala:115:32]
wire idStall_133; // @[ToAXI4.scala:115:32]
wire idStall_134; // @[ToAXI4.scala:115:32]
wire idStall_135; // @[ToAXI4.scala:115:32]
wire idStall_136; // @[ToAXI4.scala:115:32]
wire idStall_137; // @[ToAXI4.scala:115:32]
wire idStall_138; // @[ToAXI4.scala:115:32]
wire idStall_139; // @[ToAXI4.scala:115:32]
wire idStall_140; // @[ToAXI4.scala:115:32]
wire idStall_141; // @[ToAXI4.scala:115:32]
wire idStall_142; // @[ToAXI4.scala:115:32]
wire idStall_143; // @[ToAXI4.scala:115:32]
wire idStall_144; // @[ToAXI4.scala:115:32]
wire idStall_145; // @[ToAXI4.scala:115:32]
wire idStall_146; // @[ToAXI4.scala:115:32]
wire idStall_147; // @[ToAXI4.scala:115:32]
wire idStall_148; // @[ToAXI4.scala:115:32]
wire idStall_149; // @[ToAXI4.scala:115:32]
wire idStall_150; // @[ToAXI4.scala:115:32]
wire idStall_151; // @[ToAXI4.scala:115:32]
wire idStall_152; // @[ToAXI4.scala:115:32]
wire idStall_153; // @[ToAXI4.scala:115:32]
wire idStall_154; // @[ToAXI4.scala:115:32]
wire idStall_155; // @[ToAXI4.scala:115:32]
wire idStall_156; // @[ToAXI4.scala:115:32]
wire idStall_157; // @[ToAXI4.scala:115:32]
wire idStall_158; // @[ToAXI4.scala:115:32]
wire idStall_159; // @[ToAXI4.scala:115:32]
wire idStall_160; // @[ToAXI4.scala:115:32]
wire idStall_161; // @[ToAXI4.scala:115:32]
wire idStall_162; // @[ToAXI4.scala:115:32]
wire idStall_163; // @[ToAXI4.scala:115:32]
wire idStall_164; // @[ToAXI4.scala:115:32]
wire idStall_165; // @[ToAXI4.scala:115:32]
wire idStall_166; // @[ToAXI4.scala:115:32]
wire idStall_167; // @[ToAXI4.scala:115:32]
wire idStall_168; // @[ToAXI4.scala:115:32]
wire idStall_169; // @[ToAXI4.scala:115:32]
wire idStall_170; // @[ToAXI4.scala:115:32]
wire idStall_171; // @[ToAXI4.scala:115:32]
wire idStall_172; // @[ToAXI4.scala:115:32]
wire idStall_173; // @[ToAXI4.scala:115:32]
wire idStall_174; // @[ToAXI4.scala:115:32]
wire idStall_175; // @[ToAXI4.scala:115:32]
wire idStall_176; // @[ToAXI4.scala:115:32]
wire idStall_177; // @[ToAXI4.scala:115:32]
wire idStall_178; // @[ToAXI4.scala:115:32]
wire idStall_179; // @[ToAXI4.scala:115:32]
wire idStall_180; // @[ToAXI4.scala:115:32]
wire idStall_181; // @[ToAXI4.scala:115:32]
wire idStall_182; // @[ToAXI4.scala:115:32]
wire idStall_183; // @[ToAXI4.scala:115:32]
wire idStall_184; // @[ToAXI4.scala:115:32]
wire idStall_185; // @[ToAXI4.scala:115:32]
wire idStall_186; // @[ToAXI4.scala:115:32]
wire idStall_187; // @[ToAXI4.scala:115:32]
wire idStall_188; // @[ToAXI4.scala:115:32]
wire idStall_189; // @[ToAXI4.scala:115:32]
wire idStall_190; // @[ToAXI4.scala:115:32]
wire idStall_191; // @[ToAXI4.scala:115:32]
wire idStall_192; // @[ToAXI4.scala:115:32]
wire idStall_193; // @[ToAXI4.scala:115:32]
wire idStall_194; // @[ToAXI4.scala:115:32]
wire idStall_195; // @[ToAXI4.scala:115:32]
wire idStall_196; // @[ToAXI4.scala:115:32]
wire idStall_197; // @[ToAXI4.scala:115:32]
wire idStall_198; // @[ToAXI4.scala:115:32]
wire idStall_199; // @[ToAXI4.scala:115:32]
wire idStall_200; // @[ToAXI4.scala:115:32]
wire idStall_201; // @[ToAXI4.scala:115:32]
wire idStall_202; // @[ToAXI4.scala:115:32]
wire idStall_203; // @[ToAXI4.scala:115:32]
wire idStall_204; // @[ToAXI4.scala:115:32]
wire idStall_205; // @[ToAXI4.scala:115:32]
wire idStall_206; // @[ToAXI4.scala:115:32]
wire idStall_207; // @[ToAXI4.scala:115:32]
wire idStall_208; // @[ToAXI4.scala:115:32]
wire idStall_209; // @[ToAXI4.scala:115:32]
wire idStall_210; // @[ToAXI4.scala:115:32]
wire idStall_211; // @[ToAXI4.scala:115:32]
wire idStall_212; // @[ToAXI4.scala:115:32]
wire idStall_213; // @[ToAXI4.scala:115:32]
wire idStall_214; // @[ToAXI4.scala:115:32]
wire idStall_215; // @[ToAXI4.scala:115:32]
wire idStall_216; // @[ToAXI4.scala:115:32]
wire idStall_217; // @[ToAXI4.scala:115:32]
wire idStall_218; // @[ToAXI4.scala:115:32]
wire idStall_219; // @[ToAXI4.scala:115:32]
wire idStall_220; // @[ToAXI4.scala:115:32]
wire idStall_221; // @[ToAXI4.scala:115:32]
wire idStall_222; // @[ToAXI4.scala:115:32]
wire idStall_223; // @[ToAXI4.scala:115:32]
wire idStall_224; // @[ToAXI4.scala:115:32]
wire idStall_225; // @[ToAXI4.scala:115:32]
wire idStall_226; // @[ToAXI4.scala:115:32]
wire idStall_227; // @[ToAXI4.scala:115:32]
wire idStall_228; // @[ToAXI4.scala:115:32]
wire idStall_229; // @[ToAXI4.scala:115:32]
wire idStall_230; // @[ToAXI4.scala:115:32]
wire idStall_231; // @[ToAXI4.scala:115:32]
wire idStall_232; // @[ToAXI4.scala:115:32]
wire idStall_233; // @[ToAXI4.scala:115:32]
wire idStall_234; // @[ToAXI4.scala:115:32]
wire idStall_235; // @[ToAXI4.scala:115:32]
wire idStall_236; // @[ToAXI4.scala:115:32]
wire idStall_237; // @[ToAXI4.scala:115:32]
wire idStall_238; // @[ToAXI4.scala:115:32]
wire idStall_239; // @[ToAXI4.scala:115:32]
wire idStall_240; // @[ToAXI4.scala:115:32]
wire idStall_241; // @[ToAXI4.scala:115:32]
wire idStall_242; // @[ToAXI4.scala:115:32]
wire idStall_243; // @[ToAXI4.scala:115:32]
wire sourceStall_0; // @[ToAXI4.scala:113:36]
wire sourceStall_1; // @[ToAXI4.scala:113:36]
wire sourceStall_2; // @[ToAXI4.scala:113:36]
wire sourceStall_3; // @[ToAXI4.scala:113:36]
wire sourceStall_4; // @[ToAXI4.scala:113:36]
wire sourceStall_5; // @[ToAXI4.scala:113:36]
wire sourceStall_6; // @[ToAXI4.scala:113:36]
wire sourceStall_7; // @[ToAXI4.scala:113:36]
wire sourceStall_8; // @[ToAXI4.scala:113:36]
wire sourceStall_9; // @[ToAXI4.scala:113:36]
wire sourceStall_10; // @[ToAXI4.scala:113:36]
wire sourceStall_11; // @[ToAXI4.scala:113:36]
wire sourceStall_12; // @[ToAXI4.scala:113:36]
wire sourceStall_13; // @[ToAXI4.scala:113:36]
wire sourceStall_14; // @[ToAXI4.scala:113:36]
wire sourceStall_15; // @[ToAXI4.scala:113:36]
wire sourceStall_16; // @[ToAXI4.scala:113:36]
wire sourceStall_17; // @[ToAXI4.scala:113:36]
wire sourceStall_18; // @[ToAXI4.scala:113:36]
wire sourceStall_19; // @[ToAXI4.scala:113:36]
wire sourceStall_20; // @[ToAXI4.scala:113:36]
wire sourceStall_21; // @[ToAXI4.scala:113:36]
wire sourceStall_22; // @[ToAXI4.scala:113:36]
wire sourceStall_23; // @[ToAXI4.scala:113:36]
wire sourceStall_24; // @[ToAXI4.scala:113:36]
wire sourceStall_25; // @[ToAXI4.scala:113:36]
wire sourceStall_26; // @[ToAXI4.scala:113:36]
wire sourceStall_27; // @[ToAXI4.scala:113:36]
wire sourceStall_28; // @[ToAXI4.scala:113:36]
wire sourceStall_29; // @[ToAXI4.scala:113:36]
wire sourceStall_30; // @[ToAXI4.scala:113:36]
wire sourceStall_31; // @[ToAXI4.scala:113:36]
wire sourceStall_32; // @[ToAXI4.scala:113:36]
wire sourceStall_33; // @[ToAXI4.scala:113:36]
wire sourceStall_34; // @[ToAXI4.scala:113:36]
wire sourceStall_35; // @[ToAXI4.scala:113:36]
wire sourceStall_36; // @[ToAXI4.scala:113:36]
wire sourceStall_37; // @[ToAXI4.scala:113:36]
wire sourceStall_38; // @[ToAXI4.scala:113:36]
wire sourceStall_39; // @[ToAXI4.scala:113:36]
wire sourceStall_40; // @[ToAXI4.scala:113:36]
wire sourceStall_41; // @[ToAXI4.scala:113:36]
wire sourceStall_42; // @[ToAXI4.scala:113:36]
wire sourceStall_43; // @[ToAXI4.scala:113:36]
wire sourceStall_44; // @[ToAXI4.scala:113:36]
wire sourceStall_45; // @[ToAXI4.scala:113:36]
wire sourceStall_46; // @[ToAXI4.scala:113:36]
wire sourceStall_47; // @[ToAXI4.scala:113:36]
wire sourceStall_48; // @[ToAXI4.scala:113:36]
wire sourceStall_49; // @[ToAXI4.scala:113:36]
wire sourceStall_50; // @[ToAXI4.scala:113:36]
wire sourceStall_51; // @[ToAXI4.scala:113:36]
wire sourceStall_52; // @[ToAXI4.scala:113:36]
wire sourceStall_53; // @[ToAXI4.scala:113:36]
wire sourceStall_54; // @[ToAXI4.scala:113:36]
wire sourceStall_55; // @[ToAXI4.scala:113:36]
wire sourceStall_56; // @[ToAXI4.scala:113:36]
wire sourceStall_57; // @[ToAXI4.scala:113:36]
wire sourceStall_58; // @[ToAXI4.scala:113:36]
wire sourceStall_59; // @[ToAXI4.scala:113:36]
wire sourceStall_60; // @[ToAXI4.scala:113:36]
wire sourceStall_61; // @[ToAXI4.scala:113:36]
wire sourceStall_62; // @[ToAXI4.scala:113:36]
wire sourceStall_63; // @[ToAXI4.scala:113:36]
wire sourceStall_64; // @[ToAXI4.scala:113:36]
wire sourceStall_65; // @[ToAXI4.scala:113:36]
wire sourceStall_66; // @[ToAXI4.scala:113:36]
wire sourceStall_67; // @[ToAXI4.scala:113:36]
wire sourceStall_68; // @[ToAXI4.scala:113:36]
wire sourceStall_69; // @[ToAXI4.scala:113:36]
wire sourceStall_70; // @[ToAXI4.scala:113:36]
wire sourceStall_71; // @[ToAXI4.scala:113:36]
wire sourceStall_72; // @[ToAXI4.scala:113:36]
wire sourceStall_73; // @[ToAXI4.scala:113:36]
wire sourceStall_74; // @[ToAXI4.scala:113:36]
wire sourceStall_75; // @[ToAXI4.scala:113:36]
wire sourceStall_76; // @[ToAXI4.scala:113:36]
wire sourceStall_77; // @[ToAXI4.scala:113:36]
wire sourceStall_78; // @[ToAXI4.scala:113:36]
wire sourceStall_79; // @[ToAXI4.scala:113:36]
wire sourceStall_80; // @[ToAXI4.scala:113:36]
wire sourceStall_81; // @[ToAXI4.scala:113:36]
wire sourceStall_82; // @[ToAXI4.scala:113:36]
wire sourceStall_83; // @[ToAXI4.scala:113:36]
wire sourceStall_84; // @[ToAXI4.scala:113:36]
wire sourceStall_85; // @[ToAXI4.scala:113:36]
wire sourceStall_86; // @[ToAXI4.scala:113:36]
wire sourceStall_87; // @[ToAXI4.scala:113:36]
wire sourceStall_88; // @[ToAXI4.scala:113:36]
wire sourceStall_89; // @[ToAXI4.scala:113:36]
wire sourceStall_90; // @[ToAXI4.scala:113:36]
wire sourceStall_91; // @[ToAXI4.scala:113:36]
wire sourceStall_92; // @[ToAXI4.scala:113:36]
wire sourceStall_93; // @[ToAXI4.scala:113:36]
wire sourceStall_94; // @[ToAXI4.scala:113:36]
wire sourceStall_95; // @[ToAXI4.scala:113:36]
wire sourceStall_96; // @[ToAXI4.scala:113:36]
wire sourceStall_97; // @[ToAXI4.scala:113:36]
wire sourceStall_98; // @[ToAXI4.scala:113:36]
wire sourceStall_99; // @[ToAXI4.scala:113:36]
wire sourceStall_100; // @[ToAXI4.scala:113:36]
wire sourceStall_101; // @[ToAXI4.scala:113:36]
wire sourceStall_102; // @[ToAXI4.scala:113:36]
wire sourceStall_103; // @[ToAXI4.scala:113:36]
wire sourceStall_104; // @[ToAXI4.scala:113:36]
wire sourceStall_105; // @[ToAXI4.scala:113:36]
wire sourceStall_106; // @[ToAXI4.scala:113:36]
wire sourceStall_107; // @[ToAXI4.scala:113:36]
wire sourceStall_108; // @[ToAXI4.scala:113:36]
wire sourceStall_109; // @[ToAXI4.scala:113:36]
wire sourceStall_110; // @[ToAXI4.scala:113:36]
wire sourceStall_111; // @[ToAXI4.scala:113:36]
wire sourceStall_112; // @[ToAXI4.scala:113:36]
wire sourceStall_113; // @[ToAXI4.scala:113:36]
wire sourceStall_114; // @[ToAXI4.scala:113:36]
wire sourceStall_115; // @[ToAXI4.scala:113:36]
wire sourceStall_116; // @[ToAXI4.scala:113:36]
wire sourceStall_117; // @[ToAXI4.scala:113:36]
wire sourceStall_118; // @[ToAXI4.scala:113:36]
wire sourceStall_119; // @[ToAXI4.scala:113:36]
wire sourceStall_120; // @[ToAXI4.scala:113:36]
wire sourceStall_121; // @[ToAXI4.scala:113:36]
wire sourceStall_122; // @[ToAXI4.scala:113:36]
wire sourceStall_123; // @[ToAXI4.scala:113:36]
wire sourceStall_124; // @[ToAXI4.scala:113:36]
wire sourceStall_125; // @[ToAXI4.scala:113:36]
wire sourceStall_126; // @[ToAXI4.scala:113:36]
wire sourceStall_127; // @[ToAXI4.scala:113:36]
wire sourceStall_128; // @[ToAXI4.scala:113:36]
wire sourceStall_129; // @[ToAXI4.scala:113:36]
wire sourceStall_130; // @[ToAXI4.scala:113:36]
wire sourceStall_131; // @[ToAXI4.scala:113:36]
wire sourceStall_132; // @[ToAXI4.scala:113:36]
wire sourceStall_133; // @[ToAXI4.scala:113:36]
wire sourceStall_134; // @[ToAXI4.scala:113:36]
wire sourceStall_135; // @[ToAXI4.scala:113:36]
wire sourceStall_136; // @[ToAXI4.scala:113:36]
wire sourceStall_137; // @[ToAXI4.scala:113:36]
wire sourceStall_138; // @[ToAXI4.scala:113:36]
wire sourceStall_139; // @[ToAXI4.scala:113:36]
wire sourceStall_140; // @[ToAXI4.scala:113:36]
wire sourceStall_141; // @[ToAXI4.scala:113:36]
wire sourceStall_142; // @[ToAXI4.scala:113:36]
wire sourceStall_143; // @[ToAXI4.scala:113:36]
wire sourceStall_144; // @[ToAXI4.scala:113:36]
wire sourceStall_145; // @[ToAXI4.scala:113:36]
wire sourceStall_146; // @[ToAXI4.scala:113:36]
wire sourceStall_147; // @[ToAXI4.scala:113:36]
wire sourceStall_148; // @[ToAXI4.scala:113:36]
wire sourceStall_149; // @[ToAXI4.scala:113:36]
wire sourceStall_150; // @[ToAXI4.scala:113:36]
wire sourceStall_151; // @[ToAXI4.scala:113:36]
wire sourceStall_152; // @[ToAXI4.scala:113:36]
wire sourceStall_153; // @[ToAXI4.scala:113:36]
wire sourceStall_154; // @[ToAXI4.scala:113:36]
wire sourceStall_155; // @[ToAXI4.scala:113:36]
wire sourceStall_156; // @[ToAXI4.scala:113:36]
wire sourceStall_157; // @[ToAXI4.scala:113:36]
wire sourceStall_158; // @[ToAXI4.scala:113:36]
wire sourceStall_159; // @[ToAXI4.scala:113:36]
wire sourceStall_160; // @[ToAXI4.scala:113:36]
wire sourceStall_161; // @[ToAXI4.scala:113:36]
wire sourceStall_162; // @[ToAXI4.scala:113:36]
wire sourceStall_163; // @[ToAXI4.scala:113:36]
wire sourceStall_164; // @[ToAXI4.scala:113:36]
wire sourceStall_165; // @[ToAXI4.scala:113:36]
wire sourceStall_166; // @[ToAXI4.scala:113:36]
wire sourceStall_167; // @[ToAXI4.scala:113:36]
wire sourceStall_168; // @[ToAXI4.scala:113:36]
wire sourceStall_169; // @[ToAXI4.scala:113:36]
wire sourceStall_170; // @[ToAXI4.scala:113:36]
wire sourceStall_171; // @[ToAXI4.scala:113:36]
wire sourceStall_172; // @[ToAXI4.scala:113:36]
wire sourceStall_173; // @[ToAXI4.scala:113:36]
wire sourceStall_174; // @[ToAXI4.scala:113:36]
wire sourceStall_175; // @[ToAXI4.scala:113:36]
wire sourceStall_176; // @[ToAXI4.scala:113:36]
wire sourceStall_177; // @[ToAXI4.scala:113:36]
wire sourceStall_178; // @[ToAXI4.scala:113:36]
wire sourceStall_179; // @[ToAXI4.scala:113:36]
wire sourceStall_180; // @[ToAXI4.scala:113:36]
wire sourceStall_181; // @[ToAXI4.scala:113:36]
wire sourceStall_182; // @[ToAXI4.scala:113:36]
wire sourceStall_183; // @[ToAXI4.scala:113:36]
wire sourceStall_184; // @[ToAXI4.scala:113:36]
wire sourceStall_185; // @[ToAXI4.scala:113:36]
wire sourceStall_186; // @[ToAXI4.scala:113:36]
wire sourceStall_187; // @[ToAXI4.scala:113:36]
wire sourceStall_188; // @[ToAXI4.scala:113:36]
wire sourceStall_189; // @[ToAXI4.scala:113:36]
wire sourceStall_190; // @[ToAXI4.scala:113:36]
wire sourceStall_191; // @[ToAXI4.scala:113:36]
wire sourceStall_192; // @[ToAXI4.scala:113:36]
wire sourceStall_193; // @[ToAXI4.scala:113:36]
wire sourceStall_194; // @[ToAXI4.scala:113:36]
wire sourceStall_195; // @[ToAXI4.scala:113:36]
wire sourceStall_196; // @[ToAXI4.scala:113:36]
wire sourceStall_197; // @[ToAXI4.scala:113:36]
wire sourceStall_198; // @[ToAXI4.scala:113:36]
wire sourceStall_199; // @[ToAXI4.scala:113:36]
wire sourceStall_200; // @[ToAXI4.scala:113:36]
wire sourceStall_201; // @[ToAXI4.scala:113:36]
wire sourceStall_202; // @[ToAXI4.scala:113:36]
wire sourceStall_203; // @[ToAXI4.scala:113:36]
wire sourceStall_204; // @[ToAXI4.scala:113:36]
wire sourceStall_205; // @[ToAXI4.scala:113:36]
wire sourceStall_206; // @[ToAXI4.scala:113:36]
wire sourceStall_207; // @[ToAXI4.scala:113:36]
wire sourceStall_208; // @[ToAXI4.scala:113:36]
wire sourceStall_209; // @[ToAXI4.scala:113:36]
wire sourceStall_210; // @[ToAXI4.scala:113:36]
wire sourceStall_211; // @[ToAXI4.scala:113:36]
wire sourceStall_212; // @[ToAXI4.scala:113:36]
wire sourceStall_213; // @[ToAXI4.scala:113:36]
wire sourceStall_214; // @[ToAXI4.scala:113:36]
wire sourceStall_215; // @[ToAXI4.scala:113:36]
wire sourceStall_216; // @[ToAXI4.scala:113:36]
wire sourceStall_217; // @[ToAXI4.scala:113:36]
wire sourceStall_218; // @[ToAXI4.scala:113:36]
wire sourceStall_219; // @[ToAXI4.scala:113:36]
wire sourceStall_220; // @[ToAXI4.scala:113:36]
wire sourceStall_221; // @[ToAXI4.scala:113:36]
wire sourceStall_222; // @[ToAXI4.scala:113:36]
wire sourceStall_223; // @[ToAXI4.scala:113:36]
wire sourceStall_224; // @[ToAXI4.scala:113:36]
wire sourceStall_225; // @[ToAXI4.scala:113:36]
wire sourceStall_226; // @[ToAXI4.scala:113:36]
wire sourceStall_227; // @[ToAXI4.scala:113:36]
wire sourceStall_228; // @[ToAXI4.scala:113:36]
wire sourceStall_229; // @[ToAXI4.scala:113:36]
wire sourceStall_230; // @[ToAXI4.scala:113:36]
wire sourceStall_231; // @[ToAXI4.scala:113:36]
wire sourceStall_232; // @[ToAXI4.scala:113:36]
wire sourceStall_233; // @[ToAXI4.scala:113:36]
wire sourceStall_234; // @[ToAXI4.scala:113:36]
wire sourceStall_235; // @[ToAXI4.scala:113:36]
wire sourceStall_236; // @[ToAXI4.scala:113:36]
wire sourceStall_237; // @[ToAXI4.scala:113:36]
wire sourceStall_238; // @[ToAXI4.scala:113:36]
wire sourceStall_239; // @[ToAXI4.scala:113:36]
wire sourceStall_240; // @[ToAXI4.scala:113:36]
wire sourceStall_241; // @[ToAXI4.scala:113:36]
wire sourceStall_242; // @[ToAXI4.scala:113:36]
wire sourceStall_243; // @[ToAXI4.scala:113:36]
wire _idStall_0_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_0 = idStall_0; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_1_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_1 = idStall_1; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_2_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_2 = idStall_2; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_3_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_3 = idStall_3; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_4_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_4 = idStall_4; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_5_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_5 = idStall_5; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_6_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_6 = idStall_6; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_7_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_7 = idStall_7; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_8_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_8 = idStall_8; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_9_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_9 = idStall_9; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_10_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_10 = idStall_10; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_11_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_11 = idStall_11; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_12_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_12 = idStall_12; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_13_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_13 = idStall_13; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_14_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_14 = idStall_14; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_15_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_15 = idStall_15; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_16_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_16 = idStall_16; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_17_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_17 = idStall_17; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_18_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_18 = idStall_18; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_19_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_19 = idStall_19; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_20_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_20 = idStall_20; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_21_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_21 = idStall_21; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_22_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_22 = idStall_22; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_23_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_23 = idStall_23; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_24_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_24 = idStall_24; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_25_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_25 = idStall_25; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_26_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_26 = idStall_26; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_27_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_27 = idStall_27; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_28_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_28 = idStall_28; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_29_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_29 = idStall_29; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_30_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_30 = idStall_30; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_31_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_31 = idStall_31; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_32_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_32 = idStall_32; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_33_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_33 = idStall_33; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_34_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_34 = idStall_34; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_35_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_35 = idStall_35; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_36_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_36 = idStall_36; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_37_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_37 = idStall_37; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_38_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_38 = idStall_38; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_39_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_39 = idStall_39; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_40_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_40 = idStall_40; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_41_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_41 = idStall_41; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_42_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_42 = idStall_42; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_43_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_43 = idStall_43; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_44_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_44 = idStall_44; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_45_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_45 = idStall_45; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_46_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_46 = idStall_46; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_47_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_47 = idStall_47; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_48_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_48 = idStall_48; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_49_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_49 = idStall_49; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_50_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_50 = idStall_50; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_51_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_51 = idStall_51; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_52_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_52 = idStall_52; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_53_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_53 = idStall_53; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_54_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_54 = idStall_54; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_55_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_55 = idStall_55; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_56_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_56 = idStall_56; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_57_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_57 = idStall_57; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_58_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_58 = idStall_58; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_59_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_59 = idStall_59; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_60_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_60 = idStall_60; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_61_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_61 = idStall_61; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_62_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_62 = idStall_62; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_63_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_63 = idStall_63; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_64_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_64 = idStall_64; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_65_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_65 = idStall_65; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_66_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_66 = idStall_66; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_67_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_67 = idStall_67; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_68_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_68 = idStall_68; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_69_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_69 = idStall_69; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_70_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_70 = idStall_70; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_71_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_71 = idStall_71; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_72_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_72 = idStall_72; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_73_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_73 = idStall_73; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_74_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_74 = idStall_74; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_75_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_75 = idStall_75; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_76_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_76 = idStall_76; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_77_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_77 = idStall_77; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_78_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_78 = idStall_78; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_79_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_79 = idStall_79; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_80_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_80 = idStall_80; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_81_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_81 = idStall_81; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_82_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_82 = idStall_82; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_83_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_83 = idStall_83; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_84_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_84 = idStall_84; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_85_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_85 = idStall_85; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_86_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_86 = idStall_86; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_87_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_87 = idStall_87; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_88_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_88 = idStall_88; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_89_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_89 = idStall_89; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_90_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_90 = idStall_90; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_91_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_91 = idStall_91; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_92_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_92 = idStall_92; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_93_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_93 = idStall_93; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_94_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_94 = idStall_94; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_95_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_95 = idStall_95; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_96_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_96 = idStall_96; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_97_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_97 = idStall_97; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_98_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_98 = idStall_98; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_99_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_99 = idStall_99; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_100_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_100 = idStall_100; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_101_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_101 = idStall_101; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_102_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_102 = idStall_102; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_103_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_103 = idStall_103; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_104_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_104 = idStall_104; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_105_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_105 = idStall_105; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_106_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_106 = idStall_106; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_107_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_107 = idStall_107; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_108_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_108 = idStall_108; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_109_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_109 = idStall_109; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_110_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_110 = idStall_110; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_111_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_111 = idStall_111; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_112_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_112 = idStall_112; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_113_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_113 = idStall_113; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_114_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_114 = idStall_114; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_115_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_115 = idStall_115; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_116_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_116 = idStall_116; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_117_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_117 = idStall_117; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_118_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_118 = idStall_118; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_119_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_119 = idStall_119; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_120_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_120 = idStall_120; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_121_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_121 = idStall_121; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_122_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_122 = idStall_122; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_123_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_123 = idStall_123; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_124_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_124 = idStall_124; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_125_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_125 = idStall_125; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_126_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_126 = idStall_126; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_127_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_127 = idStall_127; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_128_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_128 = idStall_128; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_129_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_129 = idStall_129; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_130_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_130 = idStall_130; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_131_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_131 = idStall_131; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_132_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_132 = idStall_132; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_133_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_133 = idStall_133; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_134_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_134 = idStall_134; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_135_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_135 = idStall_135; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_136_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_136 = idStall_136; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_137_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_137 = idStall_137; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_138_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_138 = idStall_138; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_139_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_139 = idStall_139; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_140_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_140 = idStall_140; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_141_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_141 = idStall_141; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_142_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_142 = idStall_142; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_143_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_143 = idStall_143; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_144_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_144 = idStall_144; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_145_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_145 = idStall_145; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_146_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_146 = idStall_146; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_147_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_147 = idStall_147; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_148_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_148 = idStall_148; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_149_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_149 = idStall_149; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_150_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_150 = idStall_150; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_151_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_151 = idStall_151; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_152_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_152 = idStall_152; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_153_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_153 = idStall_153; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_154_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_154 = idStall_154; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_155_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_155 = idStall_155; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_156_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_156 = idStall_156; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_157_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_157 = idStall_157; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_158_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_158 = idStall_158; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_159_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_159 = idStall_159; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_160_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_160 = idStall_160; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_161_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_161 = idStall_161; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_162_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_162 = idStall_162; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_163_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_163 = idStall_163; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_164_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_164 = idStall_164; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_165_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_165 = idStall_165; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_166_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_166 = idStall_166; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_167_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_167 = idStall_167; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_168_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_168 = idStall_168; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_169_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_169 = idStall_169; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_170_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_170 = idStall_170; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_171_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_171 = idStall_171; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_172_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_172 = idStall_172; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_173_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_173 = idStall_173; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_174_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_174 = idStall_174; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_175_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_175 = idStall_175; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_176_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_176 = idStall_176; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_177_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_177 = idStall_177; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_178_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_178 = idStall_178; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_179_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_179 = idStall_179; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_180_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_180 = idStall_180; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_181_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_181 = idStall_181; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_182_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_182 = idStall_182; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_183_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_183 = idStall_183; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_184_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_184 = idStall_184; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_185_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_185 = idStall_185; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_186_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_186 = idStall_186; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_187_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_187 = idStall_187; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_188_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_188 = idStall_188; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_189_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_189 = idStall_189; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_190_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_190 = idStall_190; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_191_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_191 = idStall_191; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_192_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_192 = idStall_192; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_193_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_193 = idStall_193; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_194_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_194 = idStall_194; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_195_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_195 = idStall_195; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_196_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_196 = idStall_196; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_197_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_197 = idStall_197; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_198_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_198 = idStall_198; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_199_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_199 = idStall_199; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_200_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_200 = idStall_200; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_201_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_201 = idStall_201; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_202_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_202 = idStall_202; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_203_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_203 = idStall_203; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_204_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_204 = idStall_204; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_205_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_205 = idStall_205; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_206_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_206 = idStall_206; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_207_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_207 = idStall_207; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_208_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_208 = idStall_208; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_209_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_209 = idStall_209; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_210_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_210 = idStall_210; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_211_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_211 = idStall_211; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_212_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_212 = idStall_212; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_213_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_213 = idStall_213; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_214_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_214 = idStall_214; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_215_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_215 = idStall_215; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_216_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_216 = idStall_216; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_217_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_217 = idStall_217; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_218_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_218 = idStall_218; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_219_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_219 = idStall_219; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_220_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_220 = idStall_220; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_221_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_221 = idStall_221; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_222_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_222 = idStall_222; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_223_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_223 = idStall_223; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_224_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_224 = idStall_224; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_225_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_225 = idStall_225; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_226_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_226 = idStall_226; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_227_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_227 = idStall_227; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_228_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_228 = idStall_228; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_229_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_229 = idStall_229; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_230_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_230 = idStall_230; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_231_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_231 = idStall_231; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_232_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_232 = idStall_232; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_233_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_233 = idStall_233; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_234_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_234 = idStall_234; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_235_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_235 = idStall_235; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_236_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_236 = idStall_236; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_237_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_237 = idStall_237; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_238_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_238 = idStall_238; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_239_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_239 = idStall_239; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_240_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_240 = idStall_240; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_241_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_241 = idStall_241; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_242_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_242 = idStall_242; // @[ToAXI4.scala:113:36, :115:32]
wire _idStall_243_T_3; // @[ToAXI4.scala:286:34]
assign sourceStall_243 = idStall_243; // @[ToAXI4.scala:113:36, :115:32]
wire _a_isPut_opdata_T = nodeIn_a_bits_opcode[2]; // @[Edges.scala:92:37]
wire _r_beats1_opdata_T = nodeIn_a_bits_opcode[2]; // @[Edges.scala:92:37]
wire a_isPut = ~_a_isPut_opdata_T; // @[Edges.scala:92:{28,37}]
wire out_arw_bits_wen = a_isPut; // @[ToAXI4.scala:153:25]
wire _T_1 = nodeIn_a_ready & nodeIn_a_valid; // @[Decoupled.scala:51:35]
wire [12:0] _r_beats1_decode_T = 13'h3F << nodeIn_a_bits_size; // @[package.scala:243:71]
wire [5:0] _r_beats1_decode_T_1 = _r_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _r_beats1_decode_T_2 = ~_r_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] r_beats1_decode = _r_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire r_beats1_opdata = ~_r_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] r_beats1 = r_beats1_opdata ? r_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] r_counter; // @[Edges.scala:229:27]
wire [3:0] _r_counter1_T = {1'h0, r_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] r_counter1 = _r_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = r_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _r_last_T = r_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _r_last_T_1 = r_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_last = _r_last_T | _r_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire out_w_bits_last = a_last; // @[ToAXI4.scala:154:23]
wire r_3 = a_last & _T_1; // @[Decoupled.scala:51:35]
wire [2:0] _r_count_T = ~r_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] r_4 = r_beats1 & _r_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _r_counter_T = a_first ? r_beats1 : r_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire _out_arw_valid_T_5; // @[ToAXI4.scala:207:45]
wire [7:0] a_sel_shiftAmount = out_arw_bits_id; // @[OneHot.scala:64:49]
wire [7:0] _out_arw_bits_len_T_3; // @[ToAXI4.scala:174:84]
wire [2:0] _out_arw_bits_size_T_1; // @[ToAXI4.scala:175:23]
wire [3:0] out_arw_bits_echo_tl_state_size; // @[ToAXI4.scala:153:25]
wire [7:0] out_arw_bits_len; // @[ToAXI4.scala:153:25]
wire [2:0] out_arw_bits_size; // @[ToAXI4.scala:153:25]
wire out_arw_ready; // @[ToAXI4.scala:153:25]
wire out_arw_valid; // @[ToAXI4.scala:153:25]
wire _out_w_valid_T_4; // @[ToAXI4.scala:209:54]
wire out_w_ready; // @[ToAXI4.scala:154:23]
wire out_w_valid; // @[ToAXI4.scala:154:23]
assign nodeOut_w_valid = nodeOut_w_irr_valid; // @[Decoupled.scala:401:19]
assign nodeOut_w_bits_data = nodeOut_w_irr_bits_data; // @[Decoupled.scala:401:19]
assign nodeOut_w_bits_strb = nodeOut_w_irr_bits_strb; // @[Decoupled.scala:401:19]
assign nodeOut_w_bits_last = nodeOut_w_irr_bits_last; // @[Decoupled.scala:401:19]
wire _queue_arw_ready_T; // @[ToAXI4.scala:163:29]
assign nodeOut_aw_bits_id = queue_arw_bits_id; // @[Decoupled.scala:401:19]
assign nodeOut_ar_bits_id = queue_arw_bits_id; // @[Decoupled.scala:401:19]
assign nodeOut_aw_bits_addr = queue_arw_bits_addr; // @[Decoupled.scala:401:19]
assign nodeOut_ar_bits_addr = queue_arw_bits_addr; // @[Decoupled.scala:401:19]
assign nodeOut_aw_bits_len = queue_arw_bits_len; // @[Decoupled.scala:401:19]
assign nodeOut_ar_bits_len = queue_arw_bits_len; // @[Decoupled.scala:401:19]
assign nodeOut_aw_bits_size = queue_arw_bits_size; // @[Decoupled.scala:401:19]
assign nodeOut_ar_bits_size = queue_arw_bits_size; // @[Decoupled.scala:401:19]
assign nodeOut_aw_bits_burst = queue_arw_bits_burst; // @[Decoupled.scala:401:19]
assign nodeOut_ar_bits_burst = queue_arw_bits_burst; // @[Decoupled.scala:401:19]
assign nodeOut_aw_bits_lock = queue_arw_bits_lock; // @[Decoupled.scala:401:19]
assign nodeOut_ar_bits_lock = queue_arw_bits_lock; // @[Decoupled.scala:401:19]
assign nodeOut_aw_bits_cache = queue_arw_bits_cache; // @[Decoupled.scala:401:19]
assign nodeOut_ar_bits_cache = queue_arw_bits_cache; // @[Decoupled.scala:401:19]
assign nodeOut_aw_bits_prot = queue_arw_bits_prot; // @[Decoupled.scala:401:19]
assign nodeOut_ar_bits_prot = queue_arw_bits_prot; // @[Decoupled.scala:401:19]
assign nodeOut_aw_bits_qos = queue_arw_bits_qos; // @[Decoupled.scala:401:19]
assign nodeOut_ar_bits_qos = queue_arw_bits_qos; // @[Decoupled.scala:401:19]
assign nodeOut_aw_bits_echo_tl_state_size = queue_arw_bits_echo_tl_state_size; // @[Decoupled.scala:401:19]
assign nodeOut_ar_bits_echo_tl_state_size = queue_arw_bits_echo_tl_state_size; // @[Decoupled.scala:401:19]
assign nodeOut_aw_bits_echo_tl_state_source = queue_arw_bits_echo_tl_state_source; // @[Decoupled.scala:401:19]
assign nodeOut_ar_bits_echo_tl_state_source = queue_arw_bits_echo_tl_state_source; // @[Decoupled.scala:401:19]
wire queue_arw_bits_wen; // @[Decoupled.scala:401:19]
wire queue_arw_ready; // @[Decoupled.scala:401:19]
wire queue_arw_valid; // @[Decoupled.scala:401:19]
wire _nodeOut_ar_valid_T = ~queue_arw_bits_wen; // @[Decoupled.scala:401:19]
assign _nodeOut_ar_valid_T_1 = queue_arw_valid & _nodeOut_ar_valid_T; // @[Decoupled.scala:401:19]
assign nodeOut_ar_valid = _nodeOut_ar_valid_T_1; // @[ToAXI4.scala:161:39]
assign _nodeOut_aw_valid_T = queue_arw_valid & queue_arw_bits_wen; // @[Decoupled.scala:401:19]
assign nodeOut_aw_valid = _nodeOut_aw_valid_T; // @[ToAXI4.scala:162:39]
assign _queue_arw_ready_T = queue_arw_bits_wen ? nodeOut_aw_ready : nodeOut_ar_ready; // @[Decoupled.scala:401:19]
assign queue_arw_ready = _queue_arw_ready_T; // @[Decoupled.scala:401:19]
reg doneAW; // @[ToAXI4.scala:167:30]
wire _doneAW_T = ~a_last; // @[ToAXI4.scala:168:36]
assign out_arw_bits_id = _GEN[nodeIn_a_bits_source]; // @[ToAXI4.scala:153:25, :172:17]
wire [17:0] _out_arw_bits_len_T = 18'h7FF << nodeIn_a_bits_size; // @[package.scala:243:71]
wire [10:0] _out_arw_bits_len_T_1 = _out_arw_bits_len_T[10:0]; // @[package.scala:243:{71,76}]
wire [10:0] _out_arw_bits_len_T_2 = ~_out_arw_bits_len_T_1; // @[package.scala:243:{46,76}]
assign _out_arw_bits_len_T_3 = _out_arw_bits_len_T_2[10:3]; // @[package.scala:243:46]
assign out_arw_bits_len = _out_arw_bits_len_T_3; // @[ToAXI4.scala:153:25, :174:84]
wire _out_arw_bits_size_T = nodeIn_a_bits_size > 3'h2; // @[ToAXI4.scala:175:31]
assign _out_arw_bits_size_T_1 = _out_arw_bits_size_T ? 3'h3 : nodeIn_a_bits_size; // @[ToAXI4.scala:175:{23,31}]
assign out_arw_bits_size = _out_arw_bits_size_T_1; // @[ToAXI4.scala:153:25, :175:23]
assign out_arw_bits_echo_tl_state_size = {1'h0, nodeIn_a_bits_size}; // @[ToAXI4.scala:153:25, :189:22]
wire [255:0] _GEN_0 =
{{sourceStall_0},
{sourceStall_0},
{sourceStall_0},
{sourceStall_0},
{sourceStall_0},
{sourceStall_0},
{sourceStall_0},
{sourceStall_0},
{sourceStall_0},
{sourceStall_0},
{sourceStall_0},
{sourceStall_0},
{sourceStall_243},
{sourceStall_242},
{sourceStall_241},
{sourceStall_240},
{sourceStall_239},
{sourceStall_238},
{sourceStall_237},
{sourceStall_236},
{sourceStall_235},
{sourceStall_234},
{sourceStall_233},
{sourceStall_232},
{sourceStall_231},
{sourceStall_230},
{sourceStall_229},
{sourceStall_228},
{sourceStall_227},
{sourceStall_226},
{sourceStall_225},
{sourceStall_224},
{sourceStall_223},
{sourceStall_222},
{sourceStall_221},
{sourceStall_220},
{sourceStall_219},
{sourceStall_218},
{sourceStall_217},
{sourceStall_216},
{sourceStall_215},
{sourceStall_214},
{sourceStall_213},
{sourceStall_212},
{sourceStall_211},
{sourceStall_210},
{sourceStall_209},
{sourceStall_208},
{sourceStall_207},
{sourceStall_206},
{sourceStall_205},
{sourceStall_204},
{sourceStall_203},
{sourceStall_202},
{sourceStall_201},
{sourceStall_200},
{sourceStall_199},
{sourceStall_198},
{sourceStall_197},
{sourceStall_196},
{sourceStall_195},
{sourceStall_194},
{sourceStall_193},
{sourceStall_192},
{sourceStall_191},
{sourceStall_190},
{sourceStall_189},
{sourceStall_188},
{sourceStall_187},
{sourceStall_186},
{sourceStall_185},
{sourceStall_184},
{sourceStall_183},
{sourceStall_182},
{sourceStall_181},
{sourceStall_180},
{sourceStall_179},
{sourceStall_178},
{sourceStall_177},
{sourceStall_176},
{sourceStall_175},
{sourceStall_174},
{sourceStall_173},
{sourceStall_172},
{sourceStall_171},
{sourceStall_170},
{sourceStall_169},
{sourceStall_168},
{sourceStall_167},
{sourceStall_166},
{sourceStall_165},
{sourceStall_164},
{sourceStall_163},
{sourceStall_162},
{sourceStall_161},
{sourceStall_160},
{sourceStall_159},
{sourceStall_158},
{sourceStall_157},
{sourceStall_156},
{sourceStall_155},
{sourceStall_154},
{sourceStall_153},
{sourceStall_152},
{sourceStall_151},
{sourceStall_150},
{sourceStall_149},
{sourceStall_148},
{sourceStall_147},
{sourceStall_146},
{sourceStall_145},
{sourceStall_144},
{sourceStall_143},
{sourceStall_142},
{sourceStall_141},
{sourceStall_140},
{sourceStall_139},
{sourceStall_138},
{sourceStall_137},
{sourceStall_136},
{sourceStall_135},
{sourceStall_134},
{sourceStall_133},
{sourceStall_132},
{sourceStall_131},
{sourceStall_130},
{sourceStall_129},
{sourceStall_128},
{sourceStall_127},
{sourceStall_126},
{sourceStall_125},
{sourceStall_124},
{sourceStall_123},
{sourceStall_122},
{sourceStall_121},
{sourceStall_120},
{sourceStall_119},
{sourceStall_118},
{sourceStall_117},
{sourceStall_116},
{sourceStall_115},
{sourceStall_114},
{sourceStall_113},
{sourceStall_112},
{sourceStall_111},
{sourceStall_110},
{sourceStall_109},
{sourceStall_108},
{sourceStall_107},
{sourceStall_106},
{sourceStall_105},
{sourceStall_104},
{sourceStall_103},
{sourceStall_102},
{sourceStall_101},
{sourceStall_100},
{sourceStall_99},
{sourceStall_98},
{sourceStall_97},
{sourceStall_96},
{sourceStall_95},
{sourceStall_94},
{sourceStall_93},
{sourceStall_92},
{sourceStall_91},
{sourceStall_90},
{sourceStall_89},
{sourceStall_88},
{sourceStall_87},
{sourceStall_86},
{sourceStall_85},
{sourceStall_84},
{sourceStall_83},
{sourceStall_82},
{sourceStall_81},
{sourceStall_80},
{sourceStall_79},
{sourceStall_78},
{sourceStall_77},
{sourceStall_76},
{sourceStall_75},
{sourceStall_74},
{sourceStall_73},
{sourceStall_72},
{sourceStall_71},
{sourceStall_70},
{sourceStall_69},
{sourceStall_68},
{sourceStall_67},
{sourceStall_66},
{sourceStall_65},
{sourceStall_64},
{sourceStall_63},
{sourceStall_62},
{sourceStall_61},
{sourceStall_60},
{sourceStall_59},
{sourceStall_58},
{sourceStall_57},
{sourceStall_56},
{sourceStall_55},
{sourceStall_54},
{sourceStall_53},
{sourceStall_52},
{sourceStall_51},
{sourceStall_50},
{sourceStall_49},
{sourceStall_48},
{sourceStall_47},
{sourceStall_46},
{sourceStall_45},
{sourceStall_44},
{sourceStall_43},
{sourceStall_42},
{sourceStall_41},
{sourceStall_40},
{sourceStall_39},
{sourceStall_38},
{sourceStall_37},
{sourceStall_36},
{sourceStall_35},
{sourceStall_34},
{sourceStall_33},
{sourceStall_32},
{sourceStall_31},
{sourceStall_30},
{sourceStall_29},
{sourceStall_28},
{sourceStall_27},
{sourceStall_26},
{sourceStall_25},
{sourceStall_24},
{sourceStall_23},
{sourceStall_22},
{sourceStall_21},
{sourceStall_20},
{sourceStall_19},
{sourceStall_18},
{sourceStall_17},
{sourceStall_16},
{sourceStall_15},
{sourceStall_14},
{sourceStall_13},
{sourceStall_12},
{sourceStall_11},
{sourceStall_10},
{sourceStall_9},
{sourceStall_8},
{sourceStall_7},
{sourceStall_6},
{sourceStall_5},
{sourceStall_4},
{sourceStall_3},
{sourceStall_2},
{sourceStall_1},
{sourceStall_0}}; // @[ToAXI4.scala:113:36, :205:49]
wire stall = _GEN_0[nodeIn_a_bits_source] & a_first; // @[ToAXI4.scala:205:49]
wire _nodeIn_a_ready_T = ~stall; // @[ToAXI4.scala:205:49, :206:21]
wire _GEN_1 = doneAW | out_arw_ready; // @[ToAXI4.scala:153:25, :167:30, :206:52]
wire _nodeIn_a_ready_T_1; // @[ToAXI4.scala:206:52]
assign _nodeIn_a_ready_T_1 = _GEN_1; // @[ToAXI4.scala:206:52]
wire _out_w_valid_T_3; // @[ToAXI4.scala:209:65]
assign _out_w_valid_T_3 = _GEN_1; // @[ToAXI4.scala:206:52, :209:65]
wire _nodeIn_a_ready_T_2 = _nodeIn_a_ready_T_1 & out_w_ready; // @[ToAXI4.scala:154:23, :206:{52,70}]
wire _nodeIn_a_ready_T_3 = a_isPut ? _nodeIn_a_ready_T_2 : out_arw_ready; // @[ToAXI4.scala:153:25, :206:{34,70}]
assign _nodeIn_a_ready_T_4 = _nodeIn_a_ready_T & _nodeIn_a_ready_T_3; // @[ToAXI4.scala:206:{21,28,34}]
assign nodeIn_a_ready = _nodeIn_a_ready_T_4; // @[ToAXI4.scala:206:28]
wire _out_arw_valid_T = ~stall; // @[ToAXI4.scala:205:49, :206:21, :207:24]
wire _out_arw_valid_T_1 = _out_arw_valid_T & nodeIn_a_valid; // @[ToAXI4.scala:207:{24,31}]
wire _out_arw_valid_T_2 = ~doneAW; // @[ToAXI4.scala:167:30, :207:61]
wire _out_arw_valid_T_3 = _out_arw_valid_T_2 & out_w_ready; // @[ToAXI4.scala:154:23, :207:{61,69}]
wire _out_arw_valid_T_4 = ~a_isPut | _out_arw_valid_T_3; // @[ToAXI4.scala:207:{51,69}]
assign _out_arw_valid_T_5 = _out_arw_valid_T_1 & _out_arw_valid_T_4; // @[ToAXI4.scala:207:{31,45,51}]
assign out_arw_valid = _out_arw_valid_T_5; // @[ToAXI4.scala:153:25, :207:45]
wire _out_w_valid_T = ~stall; // @[ToAXI4.scala:205:49, :206:21, :209:22]
wire _out_w_valid_T_1 = _out_w_valid_T & nodeIn_a_valid; // @[ToAXI4.scala:209:{22,29}]
wire _out_w_valid_T_2 = _out_w_valid_T_1 & a_isPut; // @[ToAXI4.scala:209:{29,43}]
assign _out_w_valid_T_4 = _out_w_valid_T_2 & _out_w_valid_T_3; // @[ToAXI4.scala:209:{43,54,65}]
assign out_w_valid = _out_w_valid_T_4; // @[ToAXI4.scala:154:23, :209:54]
reg r_holds_d; // @[ToAXI4.scala:216:30]
wire _r_holds_d_T = ~nodeOut_r_bits_last; // @[ToAXI4.scala:217:40]
reg [2:0] b_delay; // @[ToAXI4.scala:219:24]
wire [3:0] _b_delay_T = {1'h0, b_delay} + 4'h1; // @[ToAXI4.scala:219:24, :221:28]
wire [2:0] _b_delay_T_1 = _b_delay_T[2:0]; // @[ToAXI4.scala:221:28]
wire _r_wins_T = b_delay != 3'h7; // @[ToAXI4.scala:219:24, :225:44]
wire _r_wins_T_1 = nodeOut_r_valid & _r_wins_T; // @[ToAXI4.scala:225:{33,44}]
wire r_wins = _r_wins_T_1 | r_holds_d; // @[ToAXI4.scala:216:30, :225:{33,53}]
assign _nodeOut_r_ready_T = nodeIn_d_ready & r_wins; // @[ToAXI4.scala:225:53, :227:33]
assign nodeOut_r_ready = _nodeOut_r_ready_T; // @[ToAXI4.scala:227:33]
wire _nodeOut_b_ready_T = ~r_wins; // @[ToAXI4.scala:225:53, :228:36]
assign _nodeOut_b_ready_T_1 = nodeIn_d_ready & _nodeOut_b_ready_T; // @[ToAXI4.scala:228:{33,36}]
assign nodeOut_b_ready = _nodeOut_b_ready_T_1; // @[ToAXI4.scala:228:33]
assign _nodeIn_d_valid_T = r_wins ? nodeOut_r_valid : nodeOut_b_valid; // @[ToAXI4.scala:225:53, :229:24]
assign nodeIn_d_valid = _nodeIn_d_valid_T; // @[ToAXI4.scala:229:24]
reg r_first; // @[ToAXI4.scala:234:28]
wire _r_denied_T = &nodeOut_r_bits_resp; // @[ToAXI4.scala:236:39]
reg r_denied_r; // @[package.scala:88:63]
wire r_denied = r_first ? _r_denied_T : r_denied_r; // @[package.scala:88:{42,63}]
wire r_d_denied = r_denied; // @[package.scala:88:42]
wire r_corrupt = |nodeOut_r_bits_resp; // @[ToAXI4.scala:237:39]
wire b_denied = |nodeOut_b_bits_resp; // @[ToAXI4.scala:238:39]
wire b_d_denied = b_denied; // @[ToAXI4.scala:238:39]
wire _r_d_T = r_corrupt | r_denied; // @[package.scala:88:42]
wire r_d_corrupt = _r_d_T; // @[ToAXI4.scala:240:96]
wire [2:0] r_d_size; // @[Edges.scala:810:17]
assign r_d_size = nodeOut_r_bits_echo_tl_state_size[2:0]; // @[Edges.scala:810:17, :813:15]
wire [2:0] b_d_size; // @[Edges.scala:792:17]
assign b_d_size = nodeOut_b_bits_echo_tl_state_size[2:0]; // @[Edges.scala:792:17, :795:15]
assign _nodeIn_d_bits_T_opcode = {2'h0, r_wins}; // @[ToAXI4.scala:225:53, :255:23]
assign _nodeIn_d_bits_T_size = r_wins ? r_d_size : b_d_size; // @[ToAXI4.scala:225:53, :255:23]
assign _nodeIn_d_bits_T_source = r_wins ? r_d_source : b_d_source; // @[ToAXI4.scala:225:53, :255:23]
assign _nodeIn_d_bits_T_denied = r_wins ? r_d_denied : b_d_denied; // @[ToAXI4.scala:225:53, :255:23]
assign _nodeIn_d_bits_T_corrupt = r_wins & r_d_corrupt; // @[ToAXI4.scala:225:53, :255:23]
assign nodeIn_d_bits_opcode = _nodeIn_d_bits_T_opcode; // @[ToAXI4.scala:255:23]
assign nodeIn_d_bits_size = _nodeIn_d_bits_T_size; // @[ToAXI4.scala:255:23]
assign nodeIn_d_bits_source = _nodeIn_d_bits_T_source; // @[ToAXI4.scala:255:23]
assign nodeIn_d_bits_denied = _nodeIn_d_bits_T_denied; // @[ToAXI4.scala:255:23]
assign nodeIn_d_bits_corrupt = _nodeIn_d_bits_T_corrupt; // @[ToAXI4.scala:255:23]
wire [255:0] _a_sel_T = 256'h1 << a_sel_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [243:0] _a_sel_T_1 = _a_sel_T[243:0]; // @[OneHot.scala:65:{12,27}]
wire a_sel_0 = _a_sel_T_1[0]; // @[OneHot.scala:65:27]
wire a_sel_1 = _a_sel_T_1[1]; // @[OneHot.scala:65:27]
wire a_sel_2 = _a_sel_T_1[2]; // @[OneHot.scala:65:27]
wire a_sel_3 = _a_sel_T_1[3]; // @[OneHot.scala:65:27]
wire a_sel_4 = _a_sel_T_1[4]; // @[OneHot.scala:65:27]
wire a_sel_5 = _a_sel_T_1[5]; // @[OneHot.scala:65:27]
wire a_sel_6 = _a_sel_T_1[6]; // @[OneHot.scala:65:27]
wire a_sel_7 = _a_sel_T_1[7]; // @[OneHot.scala:65:27]
wire a_sel_8 = _a_sel_T_1[8]; // @[OneHot.scala:65:27]
wire a_sel_9 = _a_sel_T_1[9]; // @[OneHot.scala:65:27]
wire a_sel_10 = _a_sel_T_1[10]; // @[OneHot.scala:65:27]
wire a_sel_11 = _a_sel_T_1[11]; // @[OneHot.scala:65:27]
wire a_sel_12 = _a_sel_T_1[12]; // @[OneHot.scala:65:27]
wire a_sel_13 = _a_sel_T_1[13]; // @[OneHot.scala:65:27]
wire a_sel_14 = _a_sel_T_1[14]; // @[OneHot.scala:65:27]
wire a_sel_15 = _a_sel_T_1[15]; // @[OneHot.scala:65:27]
wire a_sel_16 = _a_sel_T_1[16]; // @[OneHot.scala:65:27]
wire a_sel_17 = _a_sel_T_1[17]; // @[OneHot.scala:65:27]
wire a_sel_18 = _a_sel_T_1[18]; // @[OneHot.scala:65:27]
wire a_sel_19 = _a_sel_T_1[19]; // @[OneHot.scala:65:27]
wire a_sel_20 = _a_sel_T_1[20]; // @[OneHot.scala:65:27]
wire a_sel_21 = _a_sel_T_1[21]; // @[OneHot.scala:65:27]
wire a_sel_22 = _a_sel_T_1[22]; // @[OneHot.scala:65:27]
wire a_sel_23 = _a_sel_T_1[23]; // @[OneHot.scala:65:27]
wire a_sel_24 = _a_sel_T_1[24]; // @[OneHot.scala:65:27]
wire a_sel_25 = _a_sel_T_1[25]; // @[OneHot.scala:65:27]
wire a_sel_26 = _a_sel_T_1[26]; // @[OneHot.scala:65:27]
wire a_sel_27 = _a_sel_T_1[27]; // @[OneHot.scala:65:27]
wire a_sel_28 = _a_sel_T_1[28]; // @[OneHot.scala:65:27]
wire a_sel_29 = _a_sel_T_1[29]; // @[OneHot.scala:65:27]
wire a_sel_30 = _a_sel_T_1[30]; // @[OneHot.scala:65:27]
wire a_sel_31 = _a_sel_T_1[31]; // @[OneHot.scala:65:27]
wire a_sel_32 = _a_sel_T_1[32]; // @[OneHot.scala:65:27]
wire a_sel_33 = _a_sel_T_1[33]; // @[OneHot.scala:65:27]
wire a_sel_34 = _a_sel_T_1[34]; // @[OneHot.scala:65:27]
wire a_sel_35 = _a_sel_T_1[35]; // @[OneHot.scala:65:27]
wire a_sel_36 = _a_sel_T_1[36]; // @[OneHot.scala:65:27]
wire a_sel_37 = _a_sel_T_1[37]; // @[OneHot.scala:65:27]
wire a_sel_38 = _a_sel_T_1[38]; // @[OneHot.scala:65:27]
wire a_sel_39 = _a_sel_T_1[39]; // @[OneHot.scala:65:27]
wire a_sel_40 = _a_sel_T_1[40]; // @[OneHot.scala:65:27]
wire a_sel_41 = _a_sel_T_1[41]; // @[OneHot.scala:65:27]
wire a_sel_42 = _a_sel_T_1[42]; // @[OneHot.scala:65:27]
wire a_sel_43 = _a_sel_T_1[43]; // @[OneHot.scala:65:27]
wire a_sel_44 = _a_sel_T_1[44]; // @[OneHot.scala:65:27]
wire a_sel_45 = _a_sel_T_1[45]; // @[OneHot.scala:65:27]
wire a_sel_46 = _a_sel_T_1[46]; // @[OneHot.scala:65:27]
wire a_sel_47 = _a_sel_T_1[47]; // @[OneHot.scala:65:27]
wire a_sel_48 = _a_sel_T_1[48]; // @[OneHot.scala:65:27]
wire a_sel_49 = _a_sel_T_1[49]; // @[OneHot.scala:65:27]
wire a_sel_50 = _a_sel_T_1[50]; // @[OneHot.scala:65:27]
wire a_sel_51 = _a_sel_T_1[51]; // @[OneHot.scala:65:27]
wire a_sel_52 = _a_sel_T_1[52]; // @[OneHot.scala:65:27]
wire a_sel_53 = _a_sel_T_1[53]; // @[OneHot.scala:65:27]
wire a_sel_54 = _a_sel_T_1[54]; // @[OneHot.scala:65:27]
wire a_sel_55 = _a_sel_T_1[55]; // @[OneHot.scala:65:27]
wire a_sel_56 = _a_sel_T_1[56]; // @[OneHot.scala:65:27]
wire a_sel_57 = _a_sel_T_1[57]; // @[OneHot.scala:65:27]
wire a_sel_58 = _a_sel_T_1[58]; // @[OneHot.scala:65:27]
wire a_sel_59 = _a_sel_T_1[59]; // @[OneHot.scala:65:27]
wire a_sel_60 = _a_sel_T_1[60]; // @[OneHot.scala:65:27]
wire a_sel_61 = _a_sel_T_1[61]; // @[OneHot.scala:65:27]
wire a_sel_62 = _a_sel_T_1[62]; // @[OneHot.scala:65:27]
wire a_sel_63 = _a_sel_T_1[63]; // @[OneHot.scala:65:27]
wire a_sel_64 = _a_sel_T_1[64]; // @[OneHot.scala:65:27]
wire a_sel_65 = _a_sel_T_1[65]; // @[OneHot.scala:65:27]
wire a_sel_66 = _a_sel_T_1[66]; // @[OneHot.scala:65:27]
wire a_sel_67 = _a_sel_T_1[67]; // @[OneHot.scala:65:27]
wire a_sel_68 = _a_sel_T_1[68]; // @[OneHot.scala:65:27]
wire a_sel_69 = _a_sel_T_1[69]; // @[OneHot.scala:65:27]
wire a_sel_70 = _a_sel_T_1[70]; // @[OneHot.scala:65:27]
wire a_sel_71 = _a_sel_T_1[71]; // @[OneHot.scala:65:27]
wire a_sel_72 = _a_sel_T_1[72]; // @[OneHot.scala:65:27]
wire a_sel_73 = _a_sel_T_1[73]; // @[OneHot.scala:65:27]
wire a_sel_74 = _a_sel_T_1[74]; // @[OneHot.scala:65:27]
wire a_sel_75 = _a_sel_T_1[75]; // @[OneHot.scala:65:27]
wire a_sel_76 = _a_sel_T_1[76]; // @[OneHot.scala:65:27]
wire a_sel_77 = _a_sel_T_1[77]; // @[OneHot.scala:65:27]
wire a_sel_78 = _a_sel_T_1[78]; // @[OneHot.scala:65:27]
wire a_sel_79 = _a_sel_T_1[79]; // @[OneHot.scala:65:27]
wire a_sel_80 = _a_sel_T_1[80]; // @[OneHot.scala:65:27]
wire a_sel_81 = _a_sel_T_1[81]; // @[OneHot.scala:65:27]
wire a_sel_82 = _a_sel_T_1[82]; // @[OneHot.scala:65:27]
wire a_sel_83 = _a_sel_T_1[83]; // @[OneHot.scala:65:27]
wire a_sel_84 = _a_sel_T_1[84]; // @[OneHot.scala:65:27]
wire a_sel_85 = _a_sel_T_1[85]; // @[OneHot.scala:65:27]
wire a_sel_86 = _a_sel_T_1[86]; // @[OneHot.scala:65:27]
wire a_sel_87 = _a_sel_T_1[87]; // @[OneHot.scala:65:27]
wire a_sel_88 = _a_sel_T_1[88]; // @[OneHot.scala:65:27]
wire a_sel_89 = _a_sel_T_1[89]; // @[OneHot.scala:65:27]
wire a_sel_90 = _a_sel_T_1[90]; // @[OneHot.scala:65:27]
wire a_sel_91 = _a_sel_T_1[91]; // @[OneHot.scala:65:27]
wire a_sel_92 = _a_sel_T_1[92]; // @[OneHot.scala:65:27]
wire a_sel_93 = _a_sel_T_1[93]; // @[OneHot.scala:65:27]
wire a_sel_94 = _a_sel_T_1[94]; // @[OneHot.scala:65:27]
wire a_sel_95 = _a_sel_T_1[95]; // @[OneHot.scala:65:27]
wire a_sel_96 = _a_sel_T_1[96]; // @[OneHot.scala:65:27]
wire a_sel_97 = _a_sel_T_1[97]; // @[OneHot.scala:65:27]
wire a_sel_98 = _a_sel_T_1[98]; // @[OneHot.scala:65:27]
wire a_sel_99 = _a_sel_T_1[99]; // @[OneHot.scala:65:27]
wire a_sel_100 = _a_sel_T_1[100]; // @[OneHot.scala:65:27]
wire a_sel_101 = _a_sel_T_1[101]; // @[OneHot.scala:65:27]
wire a_sel_102 = _a_sel_T_1[102]; // @[OneHot.scala:65:27]
wire a_sel_103 = _a_sel_T_1[103]; // @[OneHot.scala:65:27]
wire a_sel_104 = _a_sel_T_1[104]; // @[OneHot.scala:65:27]
wire a_sel_105 = _a_sel_T_1[105]; // @[OneHot.scala:65:27]
wire a_sel_106 = _a_sel_T_1[106]; // @[OneHot.scala:65:27]
wire a_sel_107 = _a_sel_T_1[107]; // @[OneHot.scala:65:27]
wire a_sel_108 = _a_sel_T_1[108]; // @[OneHot.scala:65:27]
wire a_sel_109 = _a_sel_T_1[109]; // @[OneHot.scala:65:27]
wire a_sel_110 = _a_sel_T_1[110]; // @[OneHot.scala:65:27]
wire a_sel_111 = _a_sel_T_1[111]; // @[OneHot.scala:65:27]
wire a_sel_112 = _a_sel_T_1[112]; // @[OneHot.scala:65:27]
wire a_sel_113 = _a_sel_T_1[113]; // @[OneHot.scala:65:27]
wire a_sel_114 = _a_sel_T_1[114]; // @[OneHot.scala:65:27]
wire a_sel_115 = _a_sel_T_1[115]; // @[OneHot.scala:65:27]
wire a_sel_116 = _a_sel_T_1[116]; // @[OneHot.scala:65:27]
wire a_sel_117 = _a_sel_T_1[117]; // @[OneHot.scala:65:27]
wire a_sel_118 = _a_sel_T_1[118]; // @[OneHot.scala:65:27]
wire a_sel_119 = _a_sel_T_1[119]; // @[OneHot.scala:65:27]
wire a_sel_120 = _a_sel_T_1[120]; // @[OneHot.scala:65:27]
wire a_sel_121 = _a_sel_T_1[121]; // @[OneHot.scala:65:27]
wire a_sel_122 = _a_sel_T_1[122]; // @[OneHot.scala:65:27]
wire a_sel_123 = _a_sel_T_1[123]; // @[OneHot.scala:65:27]
wire a_sel_124 = _a_sel_T_1[124]; // @[OneHot.scala:65:27]
wire a_sel_125 = _a_sel_T_1[125]; // @[OneHot.scala:65:27]
wire a_sel_126 = _a_sel_T_1[126]; // @[OneHot.scala:65:27]
wire a_sel_127 = _a_sel_T_1[127]; // @[OneHot.scala:65:27]
wire a_sel_128 = _a_sel_T_1[128]; // @[OneHot.scala:65:27]
wire a_sel_129 = _a_sel_T_1[129]; // @[OneHot.scala:65:27]
wire a_sel_130 = _a_sel_T_1[130]; // @[OneHot.scala:65:27]
wire a_sel_131 = _a_sel_T_1[131]; // @[OneHot.scala:65:27]
wire a_sel_132 = _a_sel_T_1[132]; // @[OneHot.scala:65:27]
wire a_sel_133 = _a_sel_T_1[133]; // @[OneHot.scala:65:27]
wire a_sel_134 = _a_sel_T_1[134]; // @[OneHot.scala:65:27]
wire a_sel_135 = _a_sel_T_1[135]; // @[OneHot.scala:65:27]
wire a_sel_136 = _a_sel_T_1[136]; // @[OneHot.scala:65:27]
wire a_sel_137 = _a_sel_T_1[137]; // @[OneHot.scala:65:27]
wire a_sel_138 = _a_sel_T_1[138]; // @[OneHot.scala:65:27]
wire a_sel_139 = _a_sel_T_1[139]; // @[OneHot.scala:65:27]
wire a_sel_140 = _a_sel_T_1[140]; // @[OneHot.scala:65:27]
wire a_sel_141 = _a_sel_T_1[141]; // @[OneHot.scala:65:27]
wire a_sel_142 = _a_sel_T_1[142]; // @[OneHot.scala:65:27]
wire a_sel_143 = _a_sel_T_1[143]; // @[OneHot.scala:65:27]
wire a_sel_144 = _a_sel_T_1[144]; // @[OneHot.scala:65:27]
wire a_sel_145 = _a_sel_T_1[145]; // @[OneHot.scala:65:27]
wire a_sel_146 = _a_sel_T_1[146]; // @[OneHot.scala:65:27]
wire a_sel_147 = _a_sel_T_1[147]; // @[OneHot.scala:65:27]
wire a_sel_148 = _a_sel_T_1[148]; // @[OneHot.scala:65:27]
wire a_sel_149 = _a_sel_T_1[149]; // @[OneHot.scala:65:27]
wire a_sel_150 = _a_sel_T_1[150]; // @[OneHot.scala:65:27]
wire a_sel_151 = _a_sel_T_1[151]; // @[OneHot.scala:65:27]
wire a_sel_152 = _a_sel_T_1[152]; // @[OneHot.scala:65:27]
wire a_sel_153 = _a_sel_T_1[153]; // @[OneHot.scala:65:27]
wire a_sel_154 = _a_sel_T_1[154]; // @[OneHot.scala:65:27]
wire a_sel_155 = _a_sel_T_1[155]; // @[OneHot.scala:65:27]
wire a_sel_156 = _a_sel_T_1[156]; // @[OneHot.scala:65:27]
wire a_sel_157 = _a_sel_T_1[157]; // @[OneHot.scala:65:27]
wire a_sel_158 = _a_sel_T_1[158]; // @[OneHot.scala:65:27]
wire a_sel_159 = _a_sel_T_1[159]; // @[OneHot.scala:65:27]
wire a_sel_160 = _a_sel_T_1[160]; // @[OneHot.scala:65:27]
wire a_sel_161 = _a_sel_T_1[161]; // @[OneHot.scala:65:27]
wire a_sel_162 = _a_sel_T_1[162]; // @[OneHot.scala:65:27]
wire a_sel_163 = _a_sel_T_1[163]; // @[OneHot.scala:65:27]
wire a_sel_164 = _a_sel_T_1[164]; // @[OneHot.scala:65:27]
wire a_sel_165 = _a_sel_T_1[165]; // @[OneHot.scala:65:27]
wire a_sel_166 = _a_sel_T_1[166]; // @[OneHot.scala:65:27]
wire a_sel_167 = _a_sel_T_1[167]; // @[OneHot.scala:65:27]
wire a_sel_168 = _a_sel_T_1[168]; // @[OneHot.scala:65:27]
wire a_sel_169 = _a_sel_T_1[169]; // @[OneHot.scala:65:27]
wire a_sel_170 = _a_sel_T_1[170]; // @[OneHot.scala:65:27]
wire a_sel_171 = _a_sel_T_1[171]; // @[OneHot.scala:65:27]
wire a_sel_172 = _a_sel_T_1[172]; // @[OneHot.scala:65:27]
wire a_sel_173 = _a_sel_T_1[173]; // @[OneHot.scala:65:27]
wire a_sel_174 = _a_sel_T_1[174]; // @[OneHot.scala:65:27]
wire a_sel_175 = _a_sel_T_1[175]; // @[OneHot.scala:65:27]
wire a_sel_176 = _a_sel_T_1[176]; // @[OneHot.scala:65:27]
wire a_sel_177 = _a_sel_T_1[177]; // @[OneHot.scala:65:27]
wire a_sel_178 = _a_sel_T_1[178]; // @[OneHot.scala:65:27]
wire a_sel_179 = _a_sel_T_1[179]; // @[OneHot.scala:65:27]
wire a_sel_180 = _a_sel_T_1[180]; // @[OneHot.scala:65:27]
wire a_sel_181 = _a_sel_T_1[181]; // @[OneHot.scala:65:27]
wire a_sel_182 = _a_sel_T_1[182]; // @[OneHot.scala:65:27]
wire a_sel_183 = _a_sel_T_1[183]; // @[OneHot.scala:65:27]
wire a_sel_184 = _a_sel_T_1[184]; // @[OneHot.scala:65:27]
wire a_sel_185 = _a_sel_T_1[185]; // @[OneHot.scala:65:27]
wire a_sel_186 = _a_sel_T_1[186]; // @[OneHot.scala:65:27]
wire a_sel_187 = _a_sel_T_1[187]; // @[OneHot.scala:65:27]
wire a_sel_188 = _a_sel_T_1[188]; // @[OneHot.scala:65:27]
wire a_sel_189 = _a_sel_T_1[189]; // @[OneHot.scala:65:27]
wire a_sel_190 = _a_sel_T_1[190]; // @[OneHot.scala:65:27]
wire a_sel_191 = _a_sel_T_1[191]; // @[OneHot.scala:65:27]
wire a_sel_192 = _a_sel_T_1[192]; // @[OneHot.scala:65:27]
wire a_sel_193 = _a_sel_T_1[193]; // @[OneHot.scala:65:27]
wire a_sel_194 = _a_sel_T_1[194]; // @[OneHot.scala:65:27]
wire a_sel_195 = _a_sel_T_1[195]; // @[OneHot.scala:65:27]
wire a_sel_196 = _a_sel_T_1[196]; // @[OneHot.scala:65:27]
wire a_sel_197 = _a_sel_T_1[197]; // @[OneHot.scala:65:27]
wire a_sel_198 = _a_sel_T_1[198]; // @[OneHot.scala:65:27]
wire a_sel_199 = _a_sel_T_1[199]; // @[OneHot.scala:65:27]
wire a_sel_200 = _a_sel_T_1[200]; // @[OneHot.scala:65:27]
wire a_sel_201 = _a_sel_T_1[201]; // @[OneHot.scala:65:27]
wire a_sel_202 = _a_sel_T_1[202]; // @[OneHot.scala:65:27]
wire a_sel_203 = _a_sel_T_1[203]; // @[OneHot.scala:65:27]
wire a_sel_204 = _a_sel_T_1[204]; // @[OneHot.scala:65:27]
wire a_sel_205 = _a_sel_T_1[205]; // @[OneHot.scala:65:27]
wire a_sel_206 = _a_sel_T_1[206]; // @[OneHot.scala:65:27]
wire a_sel_207 = _a_sel_T_1[207]; // @[OneHot.scala:65:27]
wire a_sel_208 = _a_sel_T_1[208]; // @[OneHot.scala:65:27]
wire a_sel_209 = _a_sel_T_1[209]; // @[OneHot.scala:65:27]
wire a_sel_210 = _a_sel_T_1[210]; // @[OneHot.scala:65:27]
wire a_sel_211 = _a_sel_T_1[211]; // @[OneHot.scala:65:27]
wire a_sel_212 = _a_sel_T_1[212]; // @[OneHot.scala:65:27]
wire a_sel_213 = _a_sel_T_1[213]; // @[OneHot.scala:65:27]
wire a_sel_214 = _a_sel_T_1[214]; // @[OneHot.scala:65:27]
wire a_sel_215 = _a_sel_T_1[215]; // @[OneHot.scala:65:27]
wire a_sel_216 = _a_sel_T_1[216]; // @[OneHot.scala:65:27]
wire a_sel_217 = _a_sel_T_1[217]; // @[OneHot.scala:65:27]
wire a_sel_218 = _a_sel_T_1[218]; // @[OneHot.scala:65:27]
wire a_sel_219 = _a_sel_T_1[219]; // @[OneHot.scala:65:27]
wire a_sel_220 = _a_sel_T_1[220]; // @[OneHot.scala:65:27]
wire a_sel_221 = _a_sel_T_1[221]; // @[OneHot.scala:65:27]
wire a_sel_222 = _a_sel_T_1[222]; // @[OneHot.scala:65:27]
wire a_sel_223 = _a_sel_T_1[223]; // @[OneHot.scala:65:27]
wire a_sel_224 = _a_sel_T_1[224]; // @[OneHot.scala:65:27]
wire a_sel_225 = _a_sel_T_1[225]; // @[OneHot.scala:65:27]
wire a_sel_226 = _a_sel_T_1[226]; // @[OneHot.scala:65:27]
wire a_sel_227 = _a_sel_T_1[227]; // @[OneHot.scala:65:27]
wire a_sel_228 = _a_sel_T_1[228]; // @[OneHot.scala:65:27]
wire a_sel_229 = _a_sel_T_1[229]; // @[OneHot.scala:65:27]
wire a_sel_230 = _a_sel_T_1[230]; // @[OneHot.scala:65:27]
wire a_sel_231 = _a_sel_T_1[231]; // @[OneHot.scala:65:27]
wire a_sel_232 = _a_sel_T_1[232]; // @[OneHot.scala:65:27]
wire a_sel_233 = _a_sel_T_1[233]; // @[OneHot.scala:65:27]
wire a_sel_234 = _a_sel_T_1[234]; // @[OneHot.scala:65:27]
wire a_sel_235 = _a_sel_T_1[235]; // @[OneHot.scala:65:27]
wire a_sel_236 = _a_sel_T_1[236]; // @[OneHot.scala:65:27]
wire a_sel_237 = _a_sel_T_1[237]; // @[OneHot.scala:65:27]
wire a_sel_238 = _a_sel_T_1[238]; // @[OneHot.scala:65:27]
wire a_sel_239 = _a_sel_T_1[239]; // @[OneHot.scala:65:27]
wire a_sel_240 = _a_sel_T_1[240]; // @[OneHot.scala:65:27]
wire a_sel_241 = _a_sel_T_1[241]; // @[OneHot.scala:65:27]
wire a_sel_242 = _a_sel_T_1[242]; // @[OneHot.scala:65:27]
wire a_sel_243 = _a_sel_T_1[243]; // @[OneHot.scala:65:27]
wire [7:0] _d_sel_T = r_wins ? nodeOut_r_bits_id : nodeOut_b_bits_id; // @[ToAXI4.scala:225:53, :261:31]
wire [7:0] d_sel_shiftAmount = _d_sel_T; // @[OneHot.scala:64:49]
wire [255:0] _d_sel_T_1 = 256'h1 << d_sel_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [243:0] _d_sel_T_2 = _d_sel_T_1[243:0]; // @[OneHot.scala:65:{12,27}]
wire d_sel_0 = _d_sel_T_2[0]; // @[OneHot.scala:65:27]
wire d_sel_1 = _d_sel_T_2[1]; // @[OneHot.scala:65:27]
wire d_sel_2 = _d_sel_T_2[2]; // @[OneHot.scala:65:27]
wire d_sel_3 = _d_sel_T_2[3]; // @[OneHot.scala:65:27]
wire d_sel_4 = _d_sel_T_2[4]; // @[OneHot.scala:65:27]
wire d_sel_5 = _d_sel_T_2[5]; // @[OneHot.scala:65:27]
wire d_sel_6 = _d_sel_T_2[6]; // @[OneHot.scala:65:27]
wire d_sel_7 = _d_sel_T_2[7]; // @[OneHot.scala:65:27]
wire d_sel_8 = _d_sel_T_2[8]; // @[OneHot.scala:65:27]
wire d_sel_9 = _d_sel_T_2[9]; // @[OneHot.scala:65:27]
wire d_sel_10 = _d_sel_T_2[10]; // @[OneHot.scala:65:27]
wire d_sel_11 = _d_sel_T_2[11]; // @[OneHot.scala:65:27]
wire d_sel_12 = _d_sel_T_2[12]; // @[OneHot.scala:65:27]
wire d_sel_13 = _d_sel_T_2[13]; // @[OneHot.scala:65:27]
wire d_sel_14 = _d_sel_T_2[14]; // @[OneHot.scala:65:27]
wire d_sel_15 = _d_sel_T_2[15]; // @[OneHot.scala:65:27]
wire d_sel_16 = _d_sel_T_2[16]; // @[OneHot.scala:65:27]
wire d_sel_17 = _d_sel_T_2[17]; // @[OneHot.scala:65:27]
wire d_sel_18 = _d_sel_T_2[18]; // @[OneHot.scala:65:27]
wire d_sel_19 = _d_sel_T_2[19]; // @[OneHot.scala:65:27]
wire d_sel_20 = _d_sel_T_2[20]; // @[OneHot.scala:65:27]
wire d_sel_21 = _d_sel_T_2[21]; // @[OneHot.scala:65:27]
wire d_sel_22 = _d_sel_T_2[22]; // @[OneHot.scala:65:27]
wire d_sel_23 = _d_sel_T_2[23]; // @[OneHot.scala:65:27]
wire d_sel_24 = _d_sel_T_2[24]; // @[OneHot.scala:65:27]
wire d_sel_25 = _d_sel_T_2[25]; // @[OneHot.scala:65:27]
wire d_sel_26 = _d_sel_T_2[26]; // @[OneHot.scala:65:27]
wire d_sel_27 = _d_sel_T_2[27]; // @[OneHot.scala:65:27]
wire d_sel_28 = _d_sel_T_2[28]; // @[OneHot.scala:65:27]
wire d_sel_29 = _d_sel_T_2[29]; // @[OneHot.scala:65:27]
wire d_sel_30 = _d_sel_T_2[30]; // @[OneHot.scala:65:27]
wire d_sel_31 = _d_sel_T_2[31]; // @[OneHot.scala:65:27]
wire d_sel_32 = _d_sel_T_2[32]; // @[OneHot.scala:65:27]
wire d_sel_33 = _d_sel_T_2[33]; // @[OneHot.scala:65:27]
wire d_sel_34 = _d_sel_T_2[34]; // @[OneHot.scala:65:27]
wire d_sel_35 = _d_sel_T_2[35]; // @[OneHot.scala:65:27]
wire d_sel_36 = _d_sel_T_2[36]; // @[OneHot.scala:65:27]
wire d_sel_37 = _d_sel_T_2[37]; // @[OneHot.scala:65:27]
wire d_sel_38 = _d_sel_T_2[38]; // @[OneHot.scala:65:27]
wire d_sel_39 = _d_sel_T_2[39]; // @[OneHot.scala:65:27]
wire d_sel_40 = _d_sel_T_2[40]; // @[OneHot.scala:65:27]
wire d_sel_41 = _d_sel_T_2[41]; // @[OneHot.scala:65:27]
wire d_sel_42 = _d_sel_T_2[42]; // @[OneHot.scala:65:27]
wire d_sel_43 = _d_sel_T_2[43]; // @[OneHot.scala:65:27]
wire d_sel_44 = _d_sel_T_2[44]; // @[OneHot.scala:65:27]
wire d_sel_45 = _d_sel_T_2[45]; // @[OneHot.scala:65:27]
wire d_sel_46 = _d_sel_T_2[46]; // @[OneHot.scala:65:27]
wire d_sel_47 = _d_sel_T_2[47]; // @[OneHot.scala:65:27]
wire d_sel_48 = _d_sel_T_2[48]; // @[OneHot.scala:65:27]
wire d_sel_49 = _d_sel_T_2[49]; // @[OneHot.scala:65:27]
wire d_sel_50 = _d_sel_T_2[50]; // @[OneHot.scala:65:27]
wire d_sel_51 = _d_sel_T_2[51]; // @[OneHot.scala:65:27]
wire d_sel_52 = _d_sel_T_2[52]; // @[OneHot.scala:65:27]
wire d_sel_53 = _d_sel_T_2[53]; // @[OneHot.scala:65:27]
wire d_sel_54 = _d_sel_T_2[54]; // @[OneHot.scala:65:27]
wire d_sel_55 = _d_sel_T_2[55]; // @[OneHot.scala:65:27]
wire d_sel_56 = _d_sel_T_2[56]; // @[OneHot.scala:65:27]
wire d_sel_57 = _d_sel_T_2[57]; // @[OneHot.scala:65:27]
wire d_sel_58 = _d_sel_T_2[58]; // @[OneHot.scala:65:27]
wire d_sel_59 = _d_sel_T_2[59]; // @[OneHot.scala:65:27]
wire d_sel_60 = _d_sel_T_2[60]; // @[OneHot.scala:65:27]
wire d_sel_61 = _d_sel_T_2[61]; // @[OneHot.scala:65:27]
wire d_sel_62 = _d_sel_T_2[62]; // @[OneHot.scala:65:27]
wire d_sel_63 = _d_sel_T_2[63]; // @[OneHot.scala:65:27]
wire d_sel_64 = _d_sel_T_2[64]; // @[OneHot.scala:65:27]
wire d_sel_65 = _d_sel_T_2[65]; // @[OneHot.scala:65:27]
wire d_sel_66 = _d_sel_T_2[66]; // @[OneHot.scala:65:27]
wire d_sel_67 = _d_sel_T_2[67]; // @[OneHot.scala:65:27]
wire d_sel_68 = _d_sel_T_2[68]; // @[OneHot.scala:65:27]
wire d_sel_69 = _d_sel_T_2[69]; // @[OneHot.scala:65:27]
wire d_sel_70 = _d_sel_T_2[70]; // @[OneHot.scala:65:27]
wire d_sel_71 = _d_sel_T_2[71]; // @[OneHot.scala:65:27]
wire d_sel_72 = _d_sel_T_2[72]; // @[OneHot.scala:65:27]
wire d_sel_73 = _d_sel_T_2[73]; // @[OneHot.scala:65:27]
wire d_sel_74 = _d_sel_T_2[74]; // @[OneHot.scala:65:27]
wire d_sel_75 = _d_sel_T_2[75]; // @[OneHot.scala:65:27]
wire d_sel_76 = _d_sel_T_2[76]; // @[OneHot.scala:65:27]
wire d_sel_77 = _d_sel_T_2[77]; // @[OneHot.scala:65:27]
wire d_sel_78 = _d_sel_T_2[78]; // @[OneHot.scala:65:27]
wire d_sel_79 = _d_sel_T_2[79]; // @[OneHot.scala:65:27]
wire d_sel_80 = _d_sel_T_2[80]; // @[OneHot.scala:65:27]
wire d_sel_81 = _d_sel_T_2[81]; // @[OneHot.scala:65:27]
wire d_sel_82 = _d_sel_T_2[82]; // @[OneHot.scala:65:27]
wire d_sel_83 = _d_sel_T_2[83]; // @[OneHot.scala:65:27]
wire d_sel_84 = _d_sel_T_2[84]; // @[OneHot.scala:65:27]
wire d_sel_85 = _d_sel_T_2[85]; // @[OneHot.scala:65:27]
wire d_sel_86 = _d_sel_T_2[86]; // @[OneHot.scala:65:27]
wire d_sel_87 = _d_sel_T_2[87]; // @[OneHot.scala:65:27]
wire d_sel_88 = _d_sel_T_2[88]; // @[OneHot.scala:65:27]
wire d_sel_89 = _d_sel_T_2[89]; // @[OneHot.scala:65:27]
wire d_sel_90 = _d_sel_T_2[90]; // @[OneHot.scala:65:27]
wire d_sel_91 = _d_sel_T_2[91]; // @[OneHot.scala:65:27]
wire d_sel_92 = _d_sel_T_2[92]; // @[OneHot.scala:65:27]
wire d_sel_93 = _d_sel_T_2[93]; // @[OneHot.scala:65:27]
wire d_sel_94 = _d_sel_T_2[94]; // @[OneHot.scala:65:27]
wire d_sel_95 = _d_sel_T_2[95]; // @[OneHot.scala:65:27]
wire d_sel_96 = _d_sel_T_2[96]; // @[OneHot.scala:65:27]
wire d_sel_97 = _d_sel_T_2[97]; // @[OneHot.scala:65:27]
wire d_sel_98 = _d_sel_T_2[98]; // @[OneHot.scala:65:27]
wire d_sel_99 = _d_sel_T_2[99]; // @[OneHot.scala:65:27]
wire d_sel_100 = _d_sel_T_2[100]; // @[OneHot.scala:65:27]
wire d_sel_101 = _d_sel_T_2[101]; // @[OneHot.scala:65:27]
wire d_sel_102 = _d_sel_T_2[102]; // @[OneHot.scala:65:27]
wire d_sel_103 = _d_sel_T_2[103]; // @[OneHot.scala:65:27]
wire d_sel_104 = _d_sel_T_2[104]; // @[OneHot.scala:65:27]
wire d_sel_105 = _d_sel_T_2[105]; // @[OneHot.scala:65:27]
wire d_sel_106 = _d_sel_T_2[106]; // @[OneHot.scala:65:27]
wire d_sel_107 = _d_sel_T_2[107]; // @[OneHot.scala:65:27]
wire d_sel_108 = _d_sel_T_2[108]; // @[OneHot.scala:65:27]
wire d_sel_109 = _d_sel_T_2[109]; // @[OneHot.scala:65:27]
wire d_sel_110 = _d_sel_T_2[110]; // @[OneHot.scala:65:27]
wire d_sel_111 = _d_sel_T_2[111]; // @[OneHot.scala:65:27]
wire d_sel_112 = _d_sel_T_2[112]; // @[OneHot.scala:65:27]
wire d_sel_113 = _d_sel_T_2[113]; // @[OneHot.scala:65:27]
wire d_sel_114 = _d_sel_T_2[114]; // @[OneHot.scala:65:27]
wire d_sel_115 = _d_sel_T_2[115]; // @[OneHot.scala:65:27]
wire d_sel_116 = _d_sel_T_2[116]; // @[OneHot.scala:65:27]
wire d_sel_117 = _d_sel_T_2[117]; // @[OneHot.scala:65:27]
wire d_sel_118 = _d_sel_T_2[118]; // @[OneHot.scala:65:27]
wire d_sel_119 = _d_sel_T_2[119]; // @[OneHot.scala:65:27]
wire d_sel_120 = _d_sel_T_2[120]; // @[OneHot.scala:65:27]
wire d_sel_121 = _d_sel_T_2[121]; // @[OneHot.scala:65:27]
wire d_sel_122 = _d_sel_T_2[122]; // @[OneHot.scala:65:27]
wire d_sel_123 = _d_sel_T_2[123]; // @[OneHot.scala:65:27]
wire d_sel_124 = _d_sel_T_2[124]; // @[OneHot.scala:65:27]
wire d_sel_125 = _d_sel_T_2[125]; // @[OneHot.scala:65:27]
wire d_sel_126 = _d_sel_T_2[126]; // @[OneHot.scala:65:27]
wire d_sel_127 = _d_sel_T_2[127]; // @[OneHot.scala:65:27]
wire d_sel_128 = _d_sel_T_2[128]; // @[OneHot.scala:65:27]
wire d_sel_129 = _d_sel_T_2[129]; // @[OneHot.scala:65:27]
wire d_sel_130 = _d_sel_T_2[130]; // @[OneHot.scala:65:27]
wire d_sel_131 = _d_sel_T_2[131]; // @[OneHot.scala:65:27]
wire d_sel_132 = _d_sel_T_2[132]; // @[OneHot.scala:65:27]
wire d_sel_133 = _d_sel_T_2[133]; // @[OneHot.scala:65:27]
wire d_sel_134 = _d_sel_T_2[134]; // @[OneHot.scala:65:27]
wire d_sel_135 = _d_sel_T_2[135]; // @[OneHot.scala:65:27]
wire d_sel_136 = _d_sel_T_2[136]; // @[OneHot.scala:65:27]
wire d_sel_137 = _d_sel_T_2[137]; // @[OneHot.scala:65:27]
wire d_sel_138 = _d_sel_T_2[138]; // @[OneHot.scala:65:27]
wire d_sel_139 = _d_sel_T_2[139]; // @[OneHot.scala:65:27]
wire d_sel_140 = _d_sel_T_2[140]; // @[OneHot.scala:65:27]
wire d_sel_141 = _d_sel_T_2[141]; // @[OneHot.scala:65:27]
wire d_sel_142 = _d_sel_T_2[142]; // @[OneHot.scala:65:27]
wire d_sel_143 = _d_sel_T_2[143]; // @[OneHot.scala:65:27]
wire d_sel_144 = _d_sel_T_2[144]; // @[OneHot.scala:65:27]
wire d_sel_145 = _d_sel_T_2[145]; // @[OneHot.scala:65:27]
wire d_sel_146 = _d_sel_T_2[146]; // @[OneHot.scala:65:27]
wire d_sel_147 = _d_sel_T_2[147]; // @[OneHot.scala:65:27]
wire d_sel_148 = _d_sel_T_2[148]; // @[OneHot.scala:65:27]
wire d_sel_149 = _d_sel_T_2[149]; // @[OneHot.scala:65:27]
wire d_sel_150 = _d_sel_T_2[150]; // @[OneHot.scala:65:27]
wire d_sel_151 = _d_sel_T_2[151]; // @[OneHot.scala:65:27]
wire d_sel_152 = _d_sel_T_2[152]; // @[OneHot.scala:65:27]
wire d_sel_153 = _d_sel_T_2[153]; // @[OneHot.scala:65:27]
wire d_sel_154 = _d_sel_T_2[154]; // @[OneHot.scala:65:27]
wire d_sel_155 = _d_sel_T_2[155]; // @[OneHot.scala:65:27]
wire d_sel_156 = _d_sel_T_2[156]; // @[OneHot.scala:65:27]
wire d_sel_157 = _d_sel_T_2[157]; // @[OneHot.scala:65:27]
wire d_sel_158 = _d_sel_T_2[158]; // @[OneHot.scala:65:27]
wire d_sel_159 = _d_sel_T_2[159]; // @[OneHot.scala:65:27]
wire d_sel_160 = _d_sel_T_2[160]; // @[OneHot.scala:65:27]
wire d_sel_161 = _d_sel_T_2[161]; // @[OneHot.scala:65:27]
wire d_sel_162 = _d_sel_T_2[162]; // @[OneHot.scala:65:27]
wire d_sel_163 = _d_sel_T_2[163]; // @[OneHot.scala:65:27]
wire d_sel_164 = _d_sel_T_2[164]; // @[OneHot.scala:65:27]
wire d_sel_165 = _d_sel_T_2[165]; // @[OneHot.scala:65:27]
wire d_sel_166 = _d_sel_T_2[166]; // @[OneHot.scala:65:27]
wire d_sel_167 = _d_sel_T_2[167]; // @[OneHot.scala:65:27]
wire d_sel_168 = _d_sel_T_2[168]; // @[OneHot.scala:65:27]
wire d_sel_169 = _d_sel_T_2[169]; // @[OneHot.scala:65:27]
wire d_sel_170 = _d_sel_T_2[170]; // @[OneHot.scala:65:27]
wire d_sel_171 = _d_sel_T_2[171]; // @[OneHot.scala:65:27]
wire d_sel_172 = _d_sel_T_2[172]; // @[OneHot.scala:65:27]
wire d_sel_173 = _d_sel_T_2[173]; // @[OneHot.scala:65:27]
wire d_sel_174 = _d_sel_T_2[174]; // @[OneHot.scala:65:27]
wire d_sel_175 = _d_sel_T_2[175]; // @[OneHot.scala:65:27]
wire d_sel_176 = _d_sel_T_2[176]; // @[OneHot.scala:65:27]
wire d_sel_177 = _d_sel_T_2[177]; // @[OneHot.scala:65:27]
wire d_sel_178 = _d_sel_T_2[178]; // @[OneHot.scala:65:27]
wire d_sel_179 = _d_sel_T_2[179]; // @[OneHot.scala:65:27]
wire d_sel_180 = _d_sel_T_2[180]; // @[OneHot.scala:65:27]
wire d_sel_181 = _d_sel_T_2[181]; // @[OneHot.scala:65:27]
wire d_sel_182 = _d_sel_T_2[182]; // @[OneHot.scala:65:27]
wire d_sel_183 = _d_sel_T_2[183]; // @[OneHot.scala:65:27]
wire d_sel_184 = _d_sel_T_2[184]; // @[OneHot.scala:65:27]
wire d_sel_185 = _d_sel_T_2[185]; // @[OneHot.scala:65:27]
wire d_sel_186 = _d_sel_T_2[186]; // @[OneHot.scala:65:27]
wire d_sel_187 = _d_sel_T_2[187]; // @[OneHot.scala:65:27]
wire d_sel_188 = _d_sel_T_2[188]; // @[OneHot.scala:65:27]
wire d_sel_189 = _d_sel_T_2[189]; // @[OneHot.scala:65:27]
wire d_sel_190 = _d_sel_T_2[190]; // @[OneHot.scala:65:27]
wire d_sel_191 = _d_sel_T_2[191]; // @[OneHot.scala:65:27]
wire d_sel_192 = _d_sel_T_2[192]; // @[OneHot.scala:65:27]
wire d_sel_193 = _d_sel_T_2[193]; // @[OneHot.scala:65:27]
wire d_sel_194 = _d_sel_T_2[194]; // @[OneHot.scala:65:27]
wire d_sel_195 = _d_sel_T_2[195]; // @[OneHot.scala:65:27]
wire d_sel_196 = _d_sel_T_2[196]; // @[OneHot.scala:65:27]
wire d_sel_197 = _d_sel_T_2[197]; // @[OneHot.scala:65:27]
wire d_sel_198 = _d_sel_T_2[198]; // @[OneHot.scala:65:27]
wire d_sel_199 = _d_sel_T_2[199]; // @[OneHot.scala:65:27]
wire d_sel_200 = _d_sel_T_2[200]; // @[OneHot.scala:65:27]
wire d_sel_201 = _d_sel_T_2[201]; // @[OneHot.scala:65:27]
wire d_sel_202 = _d_sel_T_2[202]; // @[OneHot.scala:65:27]
wire d_sel_203 = _d_sel_T_2[203]; // @[OneHot.scala:65:27]
wire d_sel_204 = _d_sel_T_2[204]; // @[OneHot.scala:65:27]
wire d_sel_205 = _d_sel_T_2[205]; // @[OneHot.scala:65:27]
wire d_sel_206 = _d_sel_T_2[206]; // @[OneHot.scala:65:27]
wire d_sel_207 = _d_sel_T_2[207]; // @[OneHot.scala:65:27]
wire d_sel_208 = _d_sel_T_2[208]; // @[OneHot.scala:65:27]
wire d_sel_209 = _d_sel_T_2[209]; // @[OneHot.scala:65:27]
wire d_sel_210 = _d_sel_T_2[210]; // @[OneHot.scala:65:27]
wire d_sel_211 = _d_sel_T_2[211]; // @[OneHot.scala:65:27]
wire d_sel_212 = _d_sel_T_2[212]; // @[OneHot.scala:65:27]
wire d_sel_213 = _d_sel_T_2[213]; // @[OneHot.scala:65:27]
wire d_sel_214 = _d_sel_T_2[214]; // @[OneHot.scala:65:27]
wire d_sel_215 = _d_sel_T_2[215]; // @[OneHot.scala:65:27]
wire d_sel_216 = _d_sel_T_2[216]; // @[OneHot.scala:65:27]
wire d_sel_217 = _d_sel_T_2[217]; // @[OneHot.scala:65:27]
wire d_sel_218 = _d_sel_T_2[218]; // @[OneHot.scala:65:27]
wire d_sel_219 = _d_sel_T_2[219]; // @[OneHot.scala:65:27]
wire d_sel_220 = _d_sel_T_2[220]; // @[OneHot.scala:65:27]
wire d_sel_221 = _d_sel_T_2[221]; // @[OneHot.scala:65:27]
wire d_sel_222 = _d_sel_T_2[222]; // @[OneHot.scala:65:27]
wire d_sel_223 = _d_sel_T_2[223]; // @[OneHot.scala:65:27]
wire d_sel_224 = _d_sel_T_2[224]; // @[OneHot.scala:65:27]
wire d_sel_225 = _d_sel_T_2[225]; // @[OneHot.scala:65:27]
wire d_sel_226 = _d_sel_T_2[226]; // @[OneHot.scala:65:27]
wire d_sel_227 = _d_sel_T_2[227]; // @[OneHot.scala:65:27]
wire d_sel_228 = _d_sel_T_2[228]; // @[OneHot.scala:65:27]
wire d_sel_229 = _d_sel_T_2[229]; // @[OneHot.scala:65:27]
wire d_sel_230 = _d_sel_T_2[230]; // @[OneHot.scala:65:27]
wire d_sel_231 = _d_sel_T_2[231]; // @[OneHot.scala:65:27]
wire d_sel_232 = _d_sel_T_2[232]; // @[OneHot.scala:65:27]
wire d_sel_233 = _d_sel_T_2[233]; // @[OneHot.scala:65:27]
wire d_sel_234 = _d_sel_T_2[234]; // @[OneHot.scala:65:27]
wire d_sel_235 = _d_sel_T_2[235]; // @[OneHot.scala:65:27]
wire d_sel_236 = _d_sel_T_2[236]; // @[OneHot.scala:65:27]
wire d_sel_237 = _d_sel_T_2[237]; // @[OneHot.scala:65:27]
wire d_sel_238 = _d_sel_T_2[238]; // @[OneHot.scala:65:27]
wire d_sel_239 = _d_sel_T_2[239]; // @[OneHot.scala:65:27]
wire d_sel_240 = _d_sel_T_2[240]; // @[OneHot.scala:65:27]
wire d_sel_241 = _d_sel_T_2[241]; // @[OneHot.scala:65:27]
wire d_sel_242 = _d_sel_T_2[242]; // @[OneHot.scala:65:27]
wire d_sel_243 = _d_sel_T_2[243]; // @[OneHot.scala:65:27]
wire d_last = ~r_wins | nodeOut_r_bits_last; // @[ToAXI4.scala:225:53, :262:23]
reg count; // @[ToAXI4.scala:272:28]
wire _idStall_0_T_2 = count; // @[ToAXI4.scala:272:28, :286:44]
reg write; // @[ToAXI4.scala:273:24]
wire idle = ~count; // @[ToAXI4.scala:272:28, :274:26]
wire _GEN_2 = out_arw_ready & out_arw_valid; // @[Decoupled.scala:51:35]
wire _inc_T; // @[Decoupled.scala:51:35]
assign _inc_T = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_1; // @[Decoupled.scala:51:35]
assign _inc_T_1 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_2; // @[Decoupled.scala:51:35]
assign _inc_T_2 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_3; // @[Decoupled.scala:51:35]
assign _inc_T_3 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_4; // @[Decoupled.scala:51:35]
assign _inc_T_4 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_5; // @[Decoupled.scala:51:35]
assign _inc_T_5 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_6; // @[Decoupled.scala:51:35]
assign _inc_T_6 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_7; // @[Decoupled.scala:51:35]
assign _inc_T_7 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_8; // @[Decoupled.scala:51:35]
assign _inc_T_8 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_9; // @[Decoupled.scala:51:35]
assign _inc_T_9 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_10; // @[Decoupled.scala:51:35]
assign _inc_T_10 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_11; // @[Decoupled.scala:51:35]
assign _inc_T_11 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_12; // @[Decoupled.scala:51:35]
assign _inc_T_12 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_13; // @[Decoupled.scala:51:35]
assign _inc_T_13 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_14; // @[Decoupled.scala:51:35]
assign _inc_T_14 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_15; // @[Decoupled.scala:51:35]
assign _inc_T_15 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_16; // @[Decoupled.scala:51:35]
assign _inc_T_16 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_17; // @[Decoupled.scala:51:35]
assign _inc_T_17 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_18; // @[Decoupled.scala:51:35]
assign _inc_T_18 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_19; // @[Decoupled.scala:51:35]
assign _inc_T_19 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_20; // @[Decoupled.scala:51:35]
assign _inc_T_20 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_21; // @[Decoupled.scala:51:35]
assign _inc_T_21 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_22; // @[Decoupled.scala:51:35]
assign _inc_T_22 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_23; // @[Decoupled.scala:51:35]
assign _inc_T_23 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_24; // @[Decoupled.scala:51:35]
assign _inc_T_24 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_25; // @[Decoupled.scala:51:35]
assign _inc_T_25 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_26; // @[Decoupled.scala:51:35]
assign _inc_T_26 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_27; // @[Decoupled.scala:51:35]
assign _inc_T_27 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_28; // @[Decoupled.scala:51:35]
assign _inc_T_28 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_29; // @[Decoupled.scala:51:35]
assign _inc_T_29 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_30; // @[Decoupled.scala:51:35]
assign _inc_T_30 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_31; // @[Decoupled.scala:51:35]
assign _inc_T_31 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_32; // @[Decoupled.scala:51:35]
assign _inc_T_32 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_33; // @[Decoupled.scala:51:35]
assign _inc_T_33 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_34; // @[Decoupled.scala:51:35]
assign _inc_T_34 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_35; // @[Decoupled.scala:51:35]
assign _inc_T_35 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_36; // @[Decoupled.scala:51:35]
assign _inc_T_36 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_37; // @[Decoupled.scala:51:35]
assign _inc_T_37 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_38; // @[Decoupled.scala:51:35]
assign _inc_T_38 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_39; // @[Decoupled.scala:51:35]
assign _inc_T_39 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_40; // @[Decoupled.scala:51:35]
assign _inc_T_40 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_41; // @[Decoupled.scala:51:35]
assign _inc_T_41 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_42; // @[Decoupled.scala:51:35]
assign _inc_T_42 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_43; // @[Decoupled.scala:51:35]
assign _inc_T_43 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_44; // @[Decoupled.scala:51:35]
assign _inc_T_44 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_45; // @[Decoupled.scala:51:35]
assign _inc_T_45 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_46; // @[Decoupled.scala:51:35]
assign _inc_T_46 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_47; // @[Decoupled.scala:51:35]
assign _inc_T_47 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_48; // @[Decoupled.scala:51:35]
assign _inc_T_48 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_49; // @[Decoupled.scala:51:35]
assign _inc_T_49 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_50; // @[Decoupled.scala:51:35]
assign _inc_T_50 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_51; // @[Decoupled.scala:51:35]
assign _inc_T_51 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_52; // @[Decoupled.scala:51:35]
assign _inc_T_52 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_53; // @[Decoupled.scala:51:35]
assign _inc_T_53 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_54; // @[Decoupled.scala:51:35]
assign _inc_T_54 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_55; // @[Decoupled.scala:51:35]
assign _inc_T_55 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_56; // @[Decoupled.scala:51:35]
assign _inc_T_56 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_57; // @[Decoupled.scala:51:35]
assign _inc_T_57 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_58; // @[Decoupled.scala:51:35]
assign _inc_T_58 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_59; // @[Decoupled.scala:51:35]
assign _inc_T_59 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_60; // @[Decoupled.scala:51:35]
assign _inc_T_60 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_61; // @[Decoupled.scala:51:35]
assign _inc_T_61 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_62; // @[Decoupled.scala:51:35]
assign _inc_T_62 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_63; // @[Decoupled.scala:51:35]
assign _inc_T_63 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_64; // @[Decoupled.scala:51:35]
assign _inc_T_64 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_65; // @[Decoupled.scala:51:35]
assign _inc_T_65 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_66; // @[Decoupled.scala:51:35]
assign _inc_T_66 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_67; // @[Decoupled.scala:51:35]
assign _inc_T_67 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_68; // @[Decoupled.scala:51:35]
assign _inc_T_68 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_69; // @[Decoupled.scala:51:35]
assign _inc_T_69 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_70; // @[Decoupled.scala:51:35]
assign _inc_T_70 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_71; // @[Decoupled.scala:51:35]
assign _inc_T_71 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_72; // @[Decoupled.scala:51:35]
assign _inc_T_72 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_73; // @[Decoupled.scala:51:35]
assign _inc_T_73 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_74; // @[Decoupled.scala:51:35]
assign _inc_T_74 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_75; // @[Decoupled.scala:51:35]
assign _inc_T_75 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_76; // @[Decoupled.scala:51:35]
assign _inc_T_76 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_77; // @[Decoupled.scala:51:35]
assign _inc_T_77 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_78; // @[Decoupled.scala:51:35]
assign _inc_T_78 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_79; // @[Decoupled.scala:51:35]
assign _inc_T_79 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_80; // @[Decoupled.scala:51:35]
assign _inc_T_80 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_81; // @[Decoupled.scala:51:35]
assign _inc_T_81 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_82; // @[Decoupled.scala:51:35]
assign _inc_T_82 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_83; // @[Decoupled.scala:51:35]
assign _inc_T_83 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_84; // @[Decoupled.scala:51:35]
assign _inc_T_84 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_85; // @[Decoupled.scala:51:35]
assign _inc_T_85 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_86; // @[Decoupled.scala:51:35]
assign _inc_T_86 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_87; // @[Decoupled.scala:51:35]
assign _inc_T_87 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_88; // @[Decoupled.scala:51:35]
assign _inc_T_88 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_89; // @[Decoupled.scala:51:35]
assign _inc_T_89 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_90; // @[Decoupled.scala:51:35]
assign _inc_T_90 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_91; // @[Decoupled.scala:51:35]
assign _inc_T_91 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_92; // @[Decoupled.scala:51:35]
assign _inc_T_92 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_93; // @[Decoupled.scala:51:35]
assign _inc_T_93 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_94; // @[Decoupled.scala:51:35]
assign _inc_T_94 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_95; // @[Decoupled.scala:51:35]
assign _inc_T_95 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_96; // @[Decoupled.scala:51:35]
assign _inc_T_96 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_97; // @[Decoupled.scala:51:35]
assign _inc_T_97 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_98; // @[Decoupled.scala:51:35]
assign _inc_T_98 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_99; // @[Decoupled.scala:51:35]
assign _inc_T_99 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_100; // @[Decoupled.scala:51:35]
assign _inc_T_100 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_101; // @[Decoupled.scala:51:35]
assign _inc_T_101 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_102; // @[Decoupled.scala:51:35]
assign _inc_T_102 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_103; // @[Decoupled.scala:51:35]
assign _inc_T_103 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_104; // @[Decoupled.scala:51:35]
assign _inc_T_104 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_105; // @[Decoupled.scala:51:35]
assign _inc_T_105 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_106; // @[Decoupled.scala:51:35]
assign _inc_T_106 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_107; // @[Decoupled.scala:51:35]
assign _inc_T_107 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_108; // @[Decoupled.scala:51:35]
assign _inc_T_108 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_109; // @[Decoupled.scala:51:35]
assign _inc_T_109 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_110; // @[Decoupled.scala:51:35]
assign _inc_T_110 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_111; // @[Decoupled.scala:51:35]
assign _inc_T_111 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_112; // @[Decoupled.scala:51:35]
assign _inc_T_112 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_113; // @[Decoupled.scala:51:35]
assign _inc_T_113 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_114; // @[Decoupled.scala:51:35]
assign _inc_T_114 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_115; // @[Decoupled.scala:51:35]
assign _inc_T_115 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_116; // @[Decoupled.scala:51:35]
assign _inc_T_116 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_117; // @[Decoupled.scala:51:35]
assign _inc_T_117 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_118; // @[Decoupled.scala:51:35]
assign _inc_T_118 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_119; // @[Decoupled.scala:51:35]
assign _inc_T_119 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_120; // @[Decoupled.scala:51:35]
assign _inc_T_120 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_121; // @[Decoupled.scala:51:35]
assign _inc_T_121 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_122; // @[Decoupled.scala:51:35]
assign _inc_T_122 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_123; // @[Decoupled.scala:51:35]
assign _inc_T_123 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_124; // @[Decoupled.scala:51:35]
assign _inc_T_124 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_125; // @[Decoupled.scala:51:35]
assign _inc_T_125 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_126; // @[Decoupled.scala:51:35]
assign _inc_T_126 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_127; // @[Decoupled.scala:51:35]
assign _inc_T_127 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_128; // @[Decoupled.scala:51:35]
assign _inc_T_128 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_129; // @[Decoupled.scala:51:35]
assign _inc_T_129 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_130; // @[Decoupled.scala:51:35]
assign _inc_T_130 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_131; // @[Decoupled.scala:51:35]
assign _inc_T_131 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_132; // @[Decoupled.scala:51:35]
assign _inc_T_132 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_133; // @[Decoupled.scala:51:35]
assign _inc_T_133 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_134; // @[Decoupled.scala:51:35]
assign _inc_T_134 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_135; // @[Decoupled.scala:51:35]
assign _inc_T_135 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_136; // @[Decoupled.scala:51:35]
assign _inc_T_136 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_137; // @[Decoupled.scala:51:35]
assign _inc_T_137 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_138; // @[Decoupled.scala:51:35]
assign _inc_T_138 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_139; // @[Decoupled.scala:51:35]
assign _inc_T_139 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_140; // @[Decoupled.scala:51:35]
assign _inc_T_140 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_141; // @[Decoupled.scala:51:35]
assign _inc_T_141 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_142; // @[Decoupled.scala:51:35]
assign _inc_T_142 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_143; // @[Decoupled.scala:51:35]
assign _inc_T_143 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_144; // @[Decoupled.scala:51:35]
assign _inc_T_144 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_145; // @[Decoupled.scala:51:35]
assign _inc_T_145 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_146; // @[Decoupled.scala:51:35]
assign _inc_T_146 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_147; // @[Decoupled.scala:51:35]
assign _inc_T_147 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_148; // @[Decoupled.scala:51:35]
assign _inc_T_148 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_149; // @[Decoupled.scala:51:35]
assign _inc_T_149 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_150; // @[Decoupled.scala:51:35]
assign _inc_T_150 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_151; // @[Decoupled.scala:51:35]
assign _inc_T_151 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_152; // @[Decoupled.scala:51:35]
assign _inc_T_152 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_153; // @[Decoupled.scala:51:35]
assign _inc_T_153 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_154; // @[Decoupled.scala:51:35]
assign _inc_T_154 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_155; // @[Decoupled.scala:51:35]
assign _inc_T_155 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_156; // @[Decoupled.scala:51:35]
assign _inc_T_156 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_157; // @[Decoupled.scala:51:35]
assign _inc_T_157 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_158; // @[Decoupled.scala:51:35]
assign _inc_T_158 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_159; // @[Decoupled.scala:51:35]
assign _inc_T_159 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_160; // @[Decoupled.scala:51:35]
assign _inc_T_160 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_161; // @[Decoupled.scala:51:35]
assign _inc_T_161 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_162; // @[Decoupled.scala:51:35]
assign _inc_T_162 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_163; // @[Decoupled.scala:51:35]
assign _inc_T_163 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_164; // @[Decoupled.scala:51:35]
assign _inc_T_164 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_165; // @[Decoupled.scala:51:35]
assign _inc_T_165 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_166; // @[Decoupled.scala:51:35]
assign _inc_T_166 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_167; // @[Decoupled.scala:51:35]
assign _inc_T_167 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_168; // @[Decoupled.scala:51:35]
assign _inc_T_168 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_169; // @[Decoupled.scala:51:35]
assign _inc_T_169 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_170; // @[Decoupled.scala:51:35]
assign _inc_T_170 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_171; // @[Decoupled.scala:51:35]
assign _inc_T_171 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_172; // @[Decoupled.scala:51:35]
assign _inc_T_172 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_173; // @[Decoupled.scala:51:35]
assign _inc_T_173 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_174; // @[Decoupled.scala:51:35]
assign _inc_T_174 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_175; // @[Decoupled.scala:51:35]
assign _inc_T_175 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_176; // @[Decoupled.scala:51:35]
assign _inc_T_176 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_177; // @[Decoupled.scala:51:35]
assign _inc_T_177 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_178; // @[Decoupled.scala:51:35]
assign _inc_T_178 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_179; // @[Decoupled.scala:51:35]
assign _inc_T_179 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_180; // @[Decoupled.scala:51:35]
assign _inc_T_180 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_181; // @[Decoupled.scala:51:35]
assign _inc_T_181 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_182; // @[Decoupled.scala:51:35]
assign _inc_T_182 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_183; // @[Decoupled.scala:51:35]
assign _inc_T_183 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_184; // @[Decoupled.scala:51:35]
assign _inc_T_184 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_185; // @[Decoupled.scala:51:35]
assign _inc_T_185 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_186; // @[Decoupled.scala:51:35]
assign _inc_T_186 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_187; // @[Decoupled.scala:51:35]
assign _inc_T_187 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_188; // @[Decoupled.scala:51:35]
assign _inc_T_188 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_189; // @[Decoupled.scala:51:35]
assign _inc_T_189 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_190; // @[Decoupled.scala:51:35]
assign _inc_T_190 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_191; // @[Decoupled.scala:51:35]
assign _inc_T_191 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_192; // @[Decoupled.scala:51:35]
assign _inc_T_192 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_193; // @[Decoupled.scala:51:35]
assign _inc_T_193 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_194; // @[Decoupled.scala:51:35]
assign _inc_T_194 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_195; // @[Decoupled.scala:51:35]
assign _inc_T_195 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_196; // @[Decoupled.scala:51:35]
assign _inc_T_196 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_197; // @[Decoupled.scala:51:35]
assign _inc_T_197 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_198; // @[Decoupled.scala:51:35]
assign _inc_T_198 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_199; // @[Decoupled.scala:51:35]
assign _inc_T_199 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_200; // @[Decoupled.scala:51:35]
assign _inc_T_200 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_201; // @[Decoupled.scala:51:35]
assign _inc_T_201 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_202; // @[Decoupled.scala:51:35]
assign _inc_T_202 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_203; // @[Decoupled.scala:51:35]
assign _inc_T_203 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_204; // @[Decoupled.scala:51:35]
assign _inc_T_204 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_205; // @[Decoupled.scala:51:35]
assign _inc_T_205 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_206; // @[Decoupled.scala:51:35]
assign _inc_T_206 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_207; // @[Decoupled.scala:51:35]
assign _inc_T_207 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_208; // @[Decoupled.scala:51:35]
assign _inc_T_208 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_209; // @[Decoupled.scala:51:35]
assign _inc_T_209 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_210; // @[Decoupled.scala:51:35]
assign _inc_T_210 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_211; // @[Decoupled.scala:51:35]
assign _inc_T_211 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_212; // @[Decoupled.scala:51:35]
assign _inc_T_212 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_213; // @[Decoupled.scala:51:35]
assign _inc_T_213 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_214; // @[Decoupled.scala:51:35]
assign _inc_T_214 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_215; // @[Decoupled.scala:51:35]
assign _inc_T_215 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_216; // @[Decoupled.scala:51:35]
assign _inc_T_216 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_217; // @[Decoupled.scala:51:35]
assign _inc_T_217 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_218; // @[Decoupled.scala:51:35]
assign _inc_T_218 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_219; // @[Decoupled.scala:51:35]
assign _inc_T_219 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_220; // @[Decoupled.scala:51:35]
assign _inc_T_220 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_221; // @[Decoupled.scala:51:35]
assign _inc_T_221 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_222; // @[Decoupled.scala:51:35]
assign _inc_T_222 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_223; // @[Decoupled.scala:51:35]
assign _inc_T_223 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_224; // @[Decoupled.scala:51:35]
assign _inc_T_224 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_225; // @[Decoupled.scala:51:35]
assign _inc_T_225 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_226; // @[Decoupled.scala:51:35]
assign _inc_T_226 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_227; // @[Decoupled.scala:51:35]
assign _inc_T_227 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_228; // @[Decoupled.scala:51:35]
assign _inc_T_228 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_229; // @[Decoupled.scala:51:35]
assign _inc_T_229 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_230; // @[Decoupled.scala:51:35]
assign _inc_T_230 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_231; // @[Decoupled.scala:51:35]
assign _inc_T_231 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_232; // @[Decoupled.scala:51:35]
assign _inc_T_232 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_233; // @[Decoupled.scala:51:35]
assign _inc_T_233 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_234; // @[Decoupled.scala:51:35]
assign _inc_T_234 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_235; // @[Decoupled.scala:51:35]
assign _inc_T_235 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_236; // @[Decoupled.scala:51:35]
assign _inc_T_236 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_237; // @[Decoupled.scala:51:35]
assign _inc_T_237 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_238; // @[Decoupled.scala:51:35]
assign _inc_T_238 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_239; // @[Decoupled.scala:51:35]
assign _inc_T_239 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_240; // @[Decoupled.scala:51:35]
assign _inc_T_240 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_241; // @[Decoupled.scala:51:35]
assign _inc_T_241 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_242; // @[Decoupled.scala:51:35]
assign _inc_T_242 = _GEN_2; // @[Decoupled.scala:51:35]
wire _inc_T_243; // @[Decoupled.scala:51:35]
assign _inc_T_243 = _GEN_2; // @[Decoupled.scala:51:35]
wire inc = a_sel_0 & _inc_T; // @[Decoupled.scala:51:35]
wire _dec_T = d_sel_0 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire _GEN_3 = nodeIn_d_ready & nodeIn_d_valid; // @[Decoupled.scala:51:35]
wire _dec_T_1; // @[Decoupled.scala:51:35]
assign _dec_T_1 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_3; // @[Decoupled.scala:51:35]
assign _dec_T_3 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_5; // @[Decoupled.scala:51:35]
assign _dec_T_5 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_7; // @[Decoupled.scala:51:35]
assign _dec_T_7 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_9; // @[Decoupled.scala:51:35]
assign _dec_T_9 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_11; // @[Decoupled.scala:51:35]
assign _dec_T_11 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_13; // @[Decoupled.scala:51:35]
assign _dec_T_13 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_15; // @[Decoupled.scala:51:35]
assign _dec_T_15 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_17; // @[Decoupled.scala:51:35]
assign _dec_T_17 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_19; // @[Decoupled.scala:51:35]
assign _dec_T_19 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_21; // @[Decoupled.scala:51:35]
assign _dec_T_21 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_23; // @[Decoupled.scala:51:35]
assign _dec_T_23 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_25; // @[Decoupled.scala:51:35]
assign _dec_T_25 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_27; // @[Decoupled.scala:51:35]
assign _dec_T_27 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_29; // @[Decoupled.scala:51:35]
assign _dec_T_29 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_31; // @[Decoupled.scala:51:35]
assign _dec_T_31 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_33; // @[Decoupled.scala:51:35]
assign _dec_T_33 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_35; // @[Decoupled.scala:51:35]
assign _dec_T_35 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_37; // @[Decoupled.scala:51:35]
assign _dec_T_37 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_39; // @[Decoupled.scala:51:35]
assign _dec_T_39 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_41; // @[Decoupled.scala:51:35]
assign _dec_T_41 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_43; // @[Decoupled.scala:51:35]
assign _dec_T_43 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_45; // @[Decoupled.scala:51:35]
assign _dec_T_45 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_47; // @[Decoupled.scala:51:35]
assign _dec_T_47 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_49; // @[Decoupled.scala:51:35]
assign _dec_T_49 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_51; // @[Decoupled.scala:51:35]
assign _dec_T_51 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_53; // @[Decoupled.scala:51:35]
assign _dec_T_53 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_55; // @[Decoupled.scala:51:35]
assign _dec_T_55 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_57; // @[Decoupled.scala:51:35]
assign _dec_T_57 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_59; // @[Decoupled.scala:51:35]
assign _dec_T_59 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_61; // @[Decoupled.scala:51:35]
assign _dec_T_61 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_63; // @[Decoupled.scala:51:35]
assign _dec_T_63 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_65; // @[Decoupled.scala:51:35]
assign _dec_T_65 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_67; // @[Decoupled.scala:51:35]
assign _dec_T_67 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_69; // @[Decoupled.scala:51:35]
assign _dec_T_69 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_71; // @[Decoupled.scala:51:35]
assign _dec_T_71 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_73; // @[Decoupled.scala:51:35]
assign _dec_T_73 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_75; // @[Decoupled.scala:51:35]
assign _dec_T_75 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_77; // @[Decoupled.scala:51:35]
assign _dec_T_77 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_79; // @[Decoupled.scala:51:35]
assign _dec_T_79 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_81; // @[Decoupled.scala:51:35]
assign _dec_T_81 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_83; // @[Decoupled.scala:51:35]
assign _dec_T_83 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_85; // @[Decoupled.scala:51:35]
assign _dec_T_85 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_87; // @[Decoupled.scala:51:35]
assign _dec_T_87 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_89; // @[Decoupled.scala:51:35]
assign _dec_T_89 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_91; // @[Decoupled.scala:51:35]
assign _dec_T_91 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_93; // @[Decoupled.scala:51:35]
assign _dec_T_93 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_95; // @[Decoupled.scala:51:35]
assign _dec_T_95 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_97; // @[Decoupled.scala:51:35]
assign _dec_T_97 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_99; // @[Decoupled.scala:51:35]
assign _dec_T_99 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_101; // @[Decoupled.scala:51:35]
assign _dec_T_101 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_103; // @[Decoupled.scala:51:35]
assign _dec_T_103 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_105; // @[Decoupled.scala:51:35]
assign _dec_T_105 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_107; // @[Decoupled.scala:51:35]
assign _dec_T_107 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_109; // @[Decoupled.scala:51:35]
assign _dec_T_109 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_111; // @[Decoupled.scala:51:35]
assign _dec_T_111 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_113; // @[Decoupled.scala:51:35]
assign _dec_T_113 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_115; // @[Decoupled.scala:51:35]
assign _dec_T_115 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_117; // @[Decoupled.scala:51:35]
assign _dec_T_117 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_119; // @[Decoupled.scala:51:35]
assign _dec_T_119 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_121; // @[Decoupled.scala:51:35]
assign _dec_T_121 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_123; // @[Decoupled.scala:51:35]
assign _dec_T_123 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_125; // @[Decoupled.scala:51:35]
assign _dec_T_125 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_127; // @[Decoupled.scala:51:35]
assign _dec_T_127 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_129; // @[Decoupled.scala:51:35]
assign _dec_T_129 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_131; // @[Decoupled.scala:51:35]
assign _dec_T_131 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_133; // @[Decoupled.scala:51:35]
assign _dec_T_133 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_135; // @[Decoupled.scala:51:35]
assign _dec_T_135 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_137; // @[Decoupled.scala:51:35]
assign _dec_T_137 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_139; // @[Decoupled.scala:51:35]
assign _dec_T_139 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_141; // @[Decoupled.scala:51:35]
assign _dec_T_141 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_143; // @[Decoupled.scala:51:35]
assign _dec_T_143 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_145; // @[Decoupled.scala:51:35]
assign _dec_T_145 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_147; // @[Decoupled.scala:51:35]
assign _dec_T_147 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_149; // @[Decoupled.scala:51:35]
assign _dec_T_149 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_151; // @[Decoupled.scala:51:35]
assign _dec_T_151 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_153; // @[Decoupled.scala:51:35]
assign _dec_T_153 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_155; // @[Decoupled.scala:51:35]
assign _dec_T_155 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_157; // @[Decoupled.scala:51:35]
assign _dec_T_157 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_159; // @[Decoupled.scala:51:35]
assign _dec_T_159 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_161; // @[Decoupled.scala:51:35]
assign _dec_T_161 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_163; // @[Decoupled.scala:51:35]
assign _dec_T_163 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_165; // @[Decoupled.scala:51:35]
assign _dec_T_165 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_167; // @[Decoupled.scala:51:35]
assign _dec_T_167 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_169; // @[Decoupled.scala:51:35]
assign _dec_T_169 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_171; // @[Decoupled.scala:51:35]
assign _dec_T_171 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_173; // @[Decoupled.scala:51:35]
assign _dec_T_173 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_175; // @[Decoupled.scala:51:35]
assign _dec_T_175 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_177; // @[Decoupled.scala:51:35]
assign _dec_T_177 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_179; // @[Decoupled.scala:51:35]
assign _dec_T_179 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_181; // @[Decoupled.scala:51:35]
assign _dec_T_181 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_183; // @[Decoupled.scala:51:35]
assign _dec_T_183 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_185; // @[Decoupled.scala:51:35]
assign _dec_T_185 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_187; // @[Decoupled.scala:51:35]
assign _dec_T_187 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_189; // @[Decoupled.scala:51:35]
assign _dec_T_189 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_191; // @[Decoupled.scala:51:35]
assign _dec_T_191 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_193; // @[Decoupled.scala:51:35]
assign _dec_T_193 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_195; // @[Decoupled.scala:51:35]
assign _dec_T_195 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_197; // @[Decoupled.scala:51:35]
assign _dec_T_197 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_199; // @[Decoupled.scala:51:35]
assign _dec_T_199 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_201; // @[Decoupled.scala:51:35]
assign _dec_T_201 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_203; // @[Decoupled.scala:51:35]
assign _dec_T_203 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_205; // @[Decoupled.scala:51:35]
assign _dec_T_205 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_207; // @[Decoupled.scala:51:35]
assign _dec_T_207 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_209; // @[Decoupled.scala:51:35]
assign _dec_T_209 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_211; // @[Decoupled.scala:51:35]
assign _dec_T_211 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_213; // @[Decoupled.scala:51:35]
assign _dec_T_213 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_215; // @[Decoupled.scala:51:35]
assign _dec_T_215 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_217; // @[Decoupled.scala:51:35]
assign _dec_T_217 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_219; // @[Decoupled.scala:51:35]
assign _dec_T_219 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_221; // @[Decoupled.scala:51:35]
assign _dec_T_221 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_223; // @[Decoupled.scala:51:35]
assign _dec_T_223 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_225; // @[Decoupled.scala:51:35]
assign _dec_T_225 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_227; // @[Decoupled.scala:51:35]
assign _dec_T_227 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_229; // @[Decoupled.scala:51:35]
assign _dec_T_229 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_231; // @[Decoupled.scala:51:35]
assign _dec_T_231 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_233; // @[Decoupled.scala:51:35]
assign _dec_T_233 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_235; // @[Decoupled.scala:51:35]
assign _dec_T_235 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_237; // @[Decoupled.scala:51:35]
assign _dec_T_237 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_239; // @[Decoupled.scala:51:35]
assign _dec_T_239 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_241; // @[Decoupled.scala:51:35]
assign _dec_T_241 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_243; // @[Decoupled.scala:51:35]
assign _dec_T_243 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_245; // @[Decoupled.scala:51:35]
assign _dec_T_245 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_247; // @[Decoupled.scala:51:35]
assign _dec_T_247 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_249; // @[Decoupled.scala:51:35]
assign _dec_T_249 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_251; // @[Decoupled.scala:51:35]
assign _dec_T_251 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_253; // @[Decoupled.scala:51:35]
assign _dec_T_253 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_255; // @[Decoupled.scala:51:35]
assign _dec_T_255 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_257; // @[Decoupled.scala:51:35]
assign _dec_T_257 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_259; // @[Decoupled.scala:51:35]
assign _dec_T_259 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_261; // @[Decoupled.scala:51:35]
assign _dec_T_261 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_263; // @[Decoupled.scala:51:35]
assign _dec_T_263 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_265; // @[Decoupled.scala:51:35]
assign _dec_T_265 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_267; // @[Decoupled.scala:51:35]
assign _dec_T_267 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_269; // @[Decoupled.scala:51:35]
assign _dec_T_269 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_271; // @[Decoupled.scala:51:35]
assign _dec_T_271 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_273; // @[Decoupled.scala:51:35]
assign _dec_T_273 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_275; // @[Decoupled.scala:51:35]
assign _dec_T_275 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_277; // @[Decoupled.scala:51:35]
assign _dec_T_277 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_279; // @[Decoupled.scala:51:35]
assign _dec_T_279 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_281; // @[Decoupled.scala:51:35]
assign _dec_T_281 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_283; // @[Decoupled.scala:51:35]
assign _dec_T_283 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_285; // @[Decoupled.scala:51:35]
assign _dec_T_285 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_287; // @[Decoupled.scala:51:35]
assign _dec_T_287 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_289; // @[Decoupled.scala:51:35]
assign _dec_T_289 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_291; // @[Decoupled.scala:51:35]
assign _dec_T_291 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_293; // @[Decoupled.scala:51:35]
assign _dec_T_293 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_295; // @[Decoupled.scala:51:35]
assign _dec_T_295 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_297; // @[Decoupled.scala:51:35]
assign _dec_T_297 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_299; // @[Decoupled.scala:51:35]
assign _dec_T_299 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_301; // @[Decoupled.scala:51:35]
assign _dec_T_301 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_303; // @[Decoupled.scala:51:35]
assign _dec_T_303 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_305; // @[Decoupled.scala:51:35]
assign _dec_T_305 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_307; // @[Decoupled.scala:51:35]
assign _dec_T_307 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_309; // @[Decoupled.scala:51:35]
assign _dec_T_309 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_311; // @[Decoupled.scala:51:35]
assign _dec_T_311 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_313; // @[Decoupled.scala:51:35]
assign _dec_T_313 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_315; // @[Decoupled.scala:51:35]
assign _dec_T_315 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_317; // @[Decoupled.scala:51:35]
assign _dec_T_317 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_319; // @[Decoupled.scala:51:35]
assign _dec_T_319 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_321; // @[Decoupled.scala:51:35]
assign _dec_T_321 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_323; // @[Decoupled.scala:51:35]
assign _dec_T_323 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_325; // @[Decoupled.scala:51:35]
assign _dec_T_325 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_327; // @[Decoupled.scala:51:35]
assign _dec_T_327 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_329; // @[Decoupled.scala:51:35]
assign _dec_T_329 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_331; // @[Decoupled.scala:51:35]
assign _dec_T_331 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_333; // @[Decoupled.scala:51:35]
assign _dec_T_333 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_335; // @[Decoupled.scala:51:35]
assign _dec_T_335 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_337; // @[Decoupled.scala:51:35]
assign _dec_T_337 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_339; // @[Decoupled.scala:51:35]
assign _dec_T_339 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_341; // @[Decoupled.scala:51:35]
assign _dec_T_341 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_343; // @[Decoupled.scala:51:35]
assign _dec_T_343 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_345; // @[Decoupled.scala:51:35]
assign _dec_T_345 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_347; // @[Decoupled.scala:51:35]
assign _dec_T_347 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_349; // @[Decoupled.scala:51:35]
assign _dec_T_349 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_351; // @[Decoupled.scala:51:35]
assign _dec_T_351 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_353; // @[Decoupled.scala:51:35]
assign _dec_T_353 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_355; // @[Decoupled.scala:51:35]
assign _dec_T_355 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_357; // @[Decoupled.scala:51:35]
assign _dec_T_357 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_359; // @[Decoupled.scala:51:35]
assign _dec_T_359 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_361; // @[Decoupled.scala:51:35]
assign _dec_T_361 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_363; // @[Decoupled.scala:51:35]
assign _dec_T_363 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_365; // @[Decoupled.scala:51:35]
assign _dec_T_365 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_367; // @[Decoupled.scala:51:35]
assign _dec_T_367 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_369; // @[Decoupled.scala:51:35]
assign _dec_T_369 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_371; // @[Decoupled.scala:51:35]
assign _dec_T_371 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_373; // @[Decoupled.scala:51:35]
assign _dec_T_373 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_375; // @[Decoupled.scala:51:35]
assign _dec_T_375 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_377; // @[Decoupled.scala:51:35]
assign _dec_T_377 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_379; // @[Decoupled.scala:51:35]
assign _dec_T_379 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_381; // @[Decoupled.scala:51:35]
assign _dec_T_381 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_383; // @[Decoupled.scala:51:35]
assign _dec_T_383 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_385; // @[Decoupled.scala:51:35]
assign _dec_T_385 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_387; // @[Decoupled.scala:51:35]
assign _dec_T_387 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_389; // @[Decoupled.scala:51:35]
assign _dec_T_389 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_391; // @[Decoupled.scala:51:35]
assign _dec_T_391 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_393; // @[Decoupled.scala:51:35]
assign _dec_T_393 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_395; // @[Decoupled.scala:51:35]
assign _dec_T_395 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_397; // @[Decoupled.scala:51:35]
assign _dec_T_397 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_399; // @[Decoupled.scala:51:35]
assign _dec_T_399 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_401; // @[Decoupled.scala:51:35]
assign _dec_T_401 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_403; // @[Decoupled.scala:51:35]
assign _dec_T_403 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_405; // @[Decoupled.scala:51:35]
assign _dec_T_405 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_407; // @[Decoupled.scala:51:35]
assign _dec_T_407 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_409; // @[Decoupled.scala:51:35]
assign _dec_T_409 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_411; // @[Decoupled.scala:51:35]
assign _dec_T_411 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_413; // @[Decoupled.scala:51:35]
assign _dec_T_413 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_415; // @[Decoupled.scala:51:35]
assign _dec_T_415 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_417; // @[Decoupled.scala:51:35]
assign _dec_T_417 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_419; // @[Decoupled.scala:51:35]
assign _dec_T_419 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_421; // @[Decoupled.scala:51:35]
assign _dec_T_421 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_423; // @[Decoupled.scala:51:35]
assign _dec_T_423 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_425; // @[Decoupled.scala:51:35]
assign _dec_T_425 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_427; // @[Decoupled.scala:51:35]
assign _dec_T_427 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_429; // @[Decoupled.scala:51:35]
assign _dec_T_429 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_431; // @[Decoupled.scala:51:35]
assign _dec_T_431 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_433; // @[Decoupled.scala:51:35]
assign _dec_T_433 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_435; // @[Decoupled.scala:51:35]
assign _dec_T_435 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_437; // @[Decoupled.scala:51:35]
assign _dec_T_437 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_439; // @[Decoupled.scala:51:35]
assign _dec_T_439 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_441; // @[Decoupled.scala:51:35]
assign _dec_T_441 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_443; // @[Decoupled.scala:51:35]
assign _dec_T_443 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_445; // @[Decoupled.scala:51:35]
assign _dec_T_445 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_447; // @[Decoupled.scala:51:35]
assign _dec_T_447 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_449; // @[Decoupled.scala:51:35]
assign _dec_T_449 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_451; // @[Decoupled.scala:51:35]
assign _dec_T_451 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_453; // @[Decoupled.scala:51:35]
assign _dec_T_453 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_455; // @[Decoupled.scala:51:35]
assign _dec_T_455 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_457; // @[Decoupled.scala:51:35]
assign _dec_T_457 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_459; // @[Decoupled.scala:51:35]
assign _dec_T_459 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_461; // @[Decoupled.scala:51:35]
assign _dec_T_461 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_463; // @[Decoupled.scala:51:35]
assign _dec_T_463 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_465; // @[Decoupled.scala:51:35]
assign _dec_T_465 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_467; // @[Decoupled.scala:51:35]
assign _dec_T_467 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_469; // @[Decoupled.scala:51:35]
assign _dec_T_469 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_471; // @[Decoupled.scala:51:35]
assign _dec_T_471 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_473; // @[Decoupled.scala:51:35]
assign _dec_T_473 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_475; // @[Decoupled.scala:51:35]
assign _dec_T_475 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_477; // @[Decoupled.scala:51:35]
assign _dec_T_477 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_479; // @[Decoupled.scala:51:35]
assign _dec_T_479 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_481; // @[Decoupled.scala:51:35]
assign _dec_T_481 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_483; // @[Decoupled.scala:51:35]
assign _dec_T_483 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_485; // @[Decoupled.scala:51:35]
assign _dec_T_485 = _GEN_3; // @[Decoupled.scala:51:35]
wire _dec_T_487; // @[Decoupled.scala:51:35]
assign _dec_T_487 = _GEN_3; // @[Decoupled.scala:51:35]
wire dec = _dec_T & _dec_T_1; // @[Decoupled.scala:51:35]
wire [1:0] _count_T = {1'h0, count} + {1'h0, inc}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_1 = _count_T[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_2 = {1'h0, _count_T_1} - {1'h0, dec}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_3 = _count_T_2[0]; // @[ToAXI4.scala:278:37]
wire _idStall_0_T = ~idle; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_0_T_3 = _idStall_0_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_0 = _idStall_0_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_1; // @[ToAXI4.scala:272:28]
wire _idStall_1_T_2 = count_1; // @[ToAXI4.scala:272:28, :286:44]
reg write_1; // @[ToAXI4.scala:273:24]
wire idle_1 = ~count_1; // @[ToAXI4.scala:272:28, :274:26]
wire inc_1 = a_sel_1 & _inc_T_1; // @[Decoupled.scala:51:35]
wire _dec_T_2 = d_sel_1 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_1 = _dec_T_2 & _dec_T_3; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_4 = {1'h0, count_1} + {1'h0, inc_1}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_5 = _count_T_4[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_6 = {1'h0, _count_T_5} - {1'h0, dec_1}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_7 = _count_T_6[0]; // @[ToAXI4.scala:278:37]
wire _idStall_1_T = ~idle_1; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_1_T_3 = _idStall_1_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_1 = _idStall_1_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_2; // @[ToAXI4.scala:272:28]
wire _idStall_2_T_2 = count_2; // @[ToAXI4.scala:272:28, :286:44]
reg write_2; // @[ToAXI4.scala:273:24]
wire idle_2 = ~count_2; // @[ToAXI4.scala:272:28, :274:26]
wire inc_2 = a_sel_2 & _inc_T_2; // @[Decoupled.scala:51:35]
wire _dec_T_4 = d_sel_2 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_2 = _dec_T_4 & _dec_T_5; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_8 = {1'h0, count_2} + {1'h0, inc_2}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_9 = _count_T_8[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_10 = {1'h0, _count_T_9} - {1'h0, dec_2}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_11 = _count_T_10[0]; // @[ToAXI4.scala:278:37]
wire _idStall_2_T = ~idle_2; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_2_T_3 = _idStall_2_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_2 = _idStall_2_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_3; // @[ToAXI4.scala:272:28]
wire _idStall_3_T_2 = count_3; // @[ToAXI4.scala:272:28, :286:44]
reg write_3; // @[ToAXI4.scala:273:24]
wire idle_3 = ~count_3; // @[ToAXI4.scala:272:28, :274:26]
wire inc_3 = a_sel_3 & _inc_T_3; // @[Decoupled.scala:51:35]
wire _dec_T_6 = d_sel_3 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_3 = _dec_T_6 & _dec_T_7; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_12 = {1'h0, count_3} + {1'h0, inc_3}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_13 = _count_T_12[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_14 = {1'h0, _count_T_13} - {1'h0, dec_3}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_15 = _count_T_14[0]; // @[ToAXI4.scala:278:37]
wire _idStall_3_T = ~idle_3; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_3_T_3 = _idStall_3_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_3 = _idStall_3_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_4; // @[ToAXI4.scala:272:28]
wire _idStall_4_T_2 = count_4; // @[ToAXI4.scala:272:28, :286:44]
reg write_4; // @[ToAXI4.scala:273:24]
wire idle_4 = ~count_4; // @[ToAXI4.scala:272:28, :274:26]
wire inc_4 = a_sel_4 & _inc_T_4; // @[Decoupled.scala:51:35]
wire _dec_T_8 = d_sel_4 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_4 = _dec_T_8 & _dec_T_9; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_16 = {1'h0, count_4} + {1'h0, inc_4}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_17 = _count_T_16[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_18 = {1'h0, _count_T_17} - {1'h0, dec_4}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_19 = _count_T_18[0]; // @[ToAXI4.scala:278:37]
wire _idStall_4_T = ~idle_4; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_4_T_3 = _idStall_4_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_4 = _idStall_4_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_5; // @[ToAXI4.scala:272:28]
wire _idStall_5_T_2 = count_5; // @[ToAXI4.scala:272:28, :286:44]
reg write_5; // @[ToAXI4.scala:273:24]
wire idle_5 = ~count_5; // @[ToAXI4.scala:272:28, :274:26]
wire inc_5 = a_sel_5 & _inc_T_5; // @[Decoupled.scala:51:35]
wire _dec_T_10 = d_sel_5 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_5 = _dec_T_10 & _dec_T_11; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_20 = {1'h0, count_5} + {1'h0, inc_5}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_21 = _count_T_20[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_22 = {1'h0, _count_T_21} - {1'h0, dec_5}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_23 = _count_T_22[0]; // @[ToAXI4.scala:278:37]
wire _idStall_5_T = ~idle_5; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_5_T_3 = _idStall_5_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_5 = _idStall_5_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_6; // @[ToAXI4.scala:272:28]
wire _idStall_6_T_2 = count_6; // @[ToAXI4.scala:272:28, :286:44]
reg write_6; // @[ToAXI4.scala:273:24]
wire idle_6 = ~count_6; // @[ToAXI4.scala:272:28, :274:26]
wire inc_6 = a_sel_6 & _inc_T_6; // @[Decoupled.scala:51:35]
wire _dec_T_12 = d_sel_6 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_6 = _dec_T_12 & _dec_T_13; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_24 = {1'h0, count_6} + {1'h0, inc_6}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_25 = _count_T_24[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_26 = {1'h0, _count_T_25} - {1'h0, dec_6}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_27 = _count_T_26[0]; // @[ToAXI4.scala:278:37]
wire _idStall_6_T = ~idle_6; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_6_T_3 = _idStall_6_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_6 = _idStall_6_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_7; // @[ToAXI4.scala:272:28]
wire _idStall_7_T_2 = count_7; // @[ToAXI4.scala:272:28, :286:44]
reg write_7; // @[ToAXI4.scala:273:24]
wire idle_7 = ~count_7; // @[ToAXI4.scala:272:28, :274:26]
wire inc_7 = a_sel_7 & _inc_T_7; // @[Decoupled.scala:51:35]
wire _dec_T_14 = d_sel_7 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_7 = _dec_T_14 & _dec_T_15; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_28 = {1'h0, count_7} + {1'h0, inc_7}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_29 = _count_T_28[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_30 = {1'h0, _count_T_29} - {1'h0, dec_7}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_31 = _count_T_30[0]; // @[ToAXI4.scala:278:37]
wire _idStall_7_T = ~idle_7; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_7_T_3 = _idStall_7_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_7 = _idStall_7_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_8; // @[ToAXI4.scala:272:28]
wire _idStall_8_T_2 = count_8; // @[ToAXI4.scala:272:28, :286:44]
reg write_8; // @[ToAXI4.scala:273:24]
wire idle_8 = ~count_8; // @[ToAXI4.scala:272:28, :274:26]
wire inc_8 = a_sel_8 & _inc_T_8; // @[Decoupled.scala:51:35]
wire _dec_T_16 = d_sel_8 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_8 = _dec_T_16 & _dec_T_17; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_32 = {1'h0, count_8} + {1'h0, inc_8}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_33 = _count_T_32[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_34 = {1'h0, _count_T_33} - {1'h0, dec_8}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_35 = _count_T_34[0]; // @[ToAXI4.scala:278:37]
wire _idStall_8_T = ~idle_8; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_8_T_3 = _idStall_8_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_8 = _idStall_8_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_9; // @[ToAXI4.scala:272:28]
wire _idStall_9_T_2 = count_9; // @[ToAXI4.scala:272:28, :286:44]
reg write_9; // @[ToAXI4.scala:273:24]
wire idle_9 = ~count_9; // @[ToAXI4.scala:272:28, :274:26]
wire inc_9 = a_sel_9 & _inc_T_9; // @[Decoupled.scala:51:35]
wire _dec_T_18 = d_sel_9 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_9 = _dec_T_18 & _dec_T_19; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_36 = {1'h0, count_9} + {1'h0, inc_9}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_37 = _count_T_36[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_38 = {1'h0, _count_T_37} - {1'h0, dec_9}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_39 = _count_T_38[0]; // @[ToAXI4.scala:278:37]
wire _idStall_9_T = ~idle_9; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_9_T_3 = _idStall_9_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_9 = _idStall_9_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_10; // @[ToAXI4.scala:272:28]
wire _idStall_10_T_2 = count_10; // @[ToAXI4.scala:272:28, :286:44]
reg write_10; // @[ToAXI4.scala:273:24]
wire idle_10 = ~count_10; // @[ToAXI4.scala:272:28, :274:26]
wire inc_10 = a_sel_10 & _inc_T_10; // @[Decoupled.scala:51:35]
wire _dec_T_20 = d_sel_10 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_10 = _dec_T_20 & _dec_T_21; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_40 = {1'h0, count_10} + {1'h0, inc_10}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_41 = _count_T_40[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_42 = {1'h0, _count_T_41} - {1'h0, dec_10}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_43 = _count_T_42[0]; // @[ToAXI4.scala:278:37]
wire _idStall_10_T = ~idle_10; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_10_T_3 = _idStall_10_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_10 = _idStall_10_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_11; // @[ToAXI4.scala:272:28]
wire _idStall_11_T_2 = count_11; // @[ToAXI4.scala:272:28, :286:44]
reg write_11; // @[ToAXI4.scala:273:24]
wire idle_11 = ~count_11; // @[ToAXI4.scala:272:28, :274:26]
wire inc_11 = a_sel_11 & _inc_T_11; // @[Decoupled.scala:51:35]
wire _dec_T_22 = d_sel_11 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_11 = _dec_T_22 & _dec_T_23; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_44 = {1'h0, count_11} + {1'h0, inc_11}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_45 = _count_T_44[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_46 = {1'h0, _count_T_45} - {1'h0, dec_11}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_47 = _count_T_46[0]; // @[ToAXI4.scala:278:37]
wire _idStall_11_T = ~idle_11; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_11_T_3 = _idStall_11_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_11 = _idStall_11_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_12; // @[ToAXI4.scala:272:28]
wire _idStall_12_T_2 = count_12; // @[ToAXI4.scala:272:28, :286:44]
reg write_12; // @[ToAXI4.scala:273:24]
wire idle_12 = ~count_12; // @[ToAXI4.scala:272:28, :274:26]
wire inc_12 = a_sel_12 & _inc_T_12; // @[Decoupled.scala:51:35]
wire _dec_T_24 = d_sel_12 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_12 = _dec_T_24 & _dec_T_25; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_48 = {1'h0, count_12} + {1'h0, inc_12}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_49 = _count_T_48[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_50 = {1'h0, _count_T_49} - {1'h0, dec_12}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_51 = _count_T_50[0]; // @[ToAXI4.scala:278:37]
wire _idStall_12_T = ~idle_12; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_12_T_3 = _idStall_12_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_12 = _idStall_12_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_13; // @[ToAXI4.scala:272:28]
wire _idStall_13_T_2 = count_13; // @[ToAXI4.scala:272:28, :286:44]
reg write_13; // @[ToAXI4.scala:273:24]
wire idle_13 = ~count_13; // @[ToAXI4.scala:272:28, :274:26]
wire inc_13 = a_sel_13 & _inc_T_13; // @[Decoupled.scala:51:35]
wire _dec_T_26 = d_sel_13 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_13 = _dec_T_26 & _dec_T_27; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_52 = {1'h0, count_13} + {1'h0, inc_13}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_53 = _count_T_52[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_54 = {1'h0, _count_T_53} - {1'h0, dec_13}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_55 = _count_T_54[0]; // @[ToAXI4.scala:278:37]
wire _idStall_13_T = ~idle_13; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_13_T_3 = _idStall_13_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_13 = _idStall_13_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_14; // @[ToAXI4.scala:272:28]
wire _idStall_14_T_2 = count_14; // @[ToAXI4.scala:272:28, :286:44]
reg write_14; // @[ToAXI4.scala:273:24]
wire idle_14 = ~count_14; // @[ToAXI4.scala:272:28, :274:26]
wire inc_14 = a_sel_14 & _inc_T_14; // @[Decoupled.scala:51:35]
wire _dec_T_28 = d_sel_14 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_14 = _dec_T_28 & _dec_T_29; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_56 = {1'h0, count_14} + {1'h0, inc_14}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_57 = _count_T_56[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_58 = {1'h0, _count_T_57} - {1'h0, dec_14}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_59 = _count_T_58[0]; // @[ToAXI4.scala:278:37]
wire _idStall_14_T = ~idle_14; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_14_T_3 = _idStall_14_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_14 = _idStall_14_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_15; // @[ToAXI4.scala:272:28]
wire _idStall_15_T_2 = count_15; // @[ToAXI4.scala:272:28, :286:44]
reg write_15; // @[ToAXI4.scala:273:24]
wire idle_15 = ~count_15; // @[ToAXI4.scala:272:28, :274:26]
wire inc_15 = a_sel_15 & _inc_T_15; // @[Decoupled.scala:51:35]
wire _dec_T_30 = d_sel_15 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_15 = _dec_T_30 & _dec_T_31; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_60 = {1'h0, count_15} + {1'h0, inc_15}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_61 = _count_T_60[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_62 = {1'h0, _count_T_61} - {1'h0, dec_15}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_63 = _count_T_62[0]; // @[ToAXI4.scala:278:37]
wire _idStall_15_T = ~idle_15; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_15_T_3 = _idStall_15_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_15 = _idStall_15_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_16; // @[ToAXI4.scala:272:28]
wire _idStall_16_T_2 = count_16; // @[ToAXI4.scala:272:28, :286:44]
reg write_16; // @[ToAXI4.scala:273:24]
wire idle_16 = ~count_16; // @[ToAXI4.scala:272:28, :274:26]
wire inc_16 = a_sel_16 & _inc_T_16; // @[Decoupled.scala:51:35]
wire _dec_T_32 = d_sel_16 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_16 = _dec_T_32 & _dec_T_33; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_64 = {1'h0, count_16} + {1'h0, inc_16}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_65 = _count_T_64[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_66 = {1'h0, _count_T_65} - {1'h0, dec_16}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_67 = _count_T_66[0]; // @[ToAXI4.scala:278:37]
wire _idStall_16_T = ~idle_16; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_16_T_3 = _idStall_16_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_16 = _idStall_16_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_17; // @[ToAXI4.scala:272:28]
wire _idStall_17_T_2 = count_17; // @[ToAXI4.scala:272:28, :286:44]
reg write_17; // @[ToAXI4.scala:273:24]
wire idle_17 = ~count_17; // @[ToAXI4.scala:272:28, :274:26]
wire inc_17 = a_sel_17 & _inc_T_17; // @[Decoupled.scala:51:35]
wire _dec_T_34 = d_sel_17 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_17 = _dec_T_34 & _dec_T_35; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_68 = {1'h0, count_17} + {1'h0, inc_17}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_69 = _count_T_68[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_70 = {1'h0, _count_T_69} - {1'h0, dec_17}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_71 = _count_T_70[0]; // @[ToAXI4.scala:278:37]
wire _idStall_17_T = ~idle_17; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_17_T_3 = _idStall_17_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_17 = _idStall_17_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_18; // @[ToAXI4.scala:272:28]
wire _idStall_18_T_2 = count_18; // @[ToAXI4.scala:272:28, :286:44]
reg write_18; // @[ToAXI4.scala:273:24]
wire idle_18 = ~count_18; // @[ToAXI4.scala:272:28, :274:26]
wire inc_18 = a_sel_18 & _inc_T_18; // @[Decoupled.scala:51:35]
wire _dec_T_36 = d_sel_18 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_18 = _dec_T_36 & _dec_T_37; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_72 = {1'h0, count_18} + {1'h0, inc_18}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_73 = _count_T_72[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_74 = {1'h0, _count_T_73} - {1'h0, dec_18}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_75 = _count_T_74[0]; // @[ToAXI4.scala:278:37]
wire _idStall_18_T = ~idle_18; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_18_T_3 = _idStall_18_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_18 = _idStall_18_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_19; // @[ToAXI4.scala:272:28]
wire _idStall_19_T_2 = count_19; // @[ToAXI4.scala:272:28, :286:44]
reg write_19; // @[ToAXI4.scala:273:24]
wire idle_19 = ~count_19; // @[ToAXI4.scala:272:28, :274:26]
wire inc_19 = a_sel_19 & _inc_T_19; // @[Decoupled.scala:51:35]
wire _dec_T_38 = d_sel_19 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_19 = _dec_T_38 & _dec_T_39; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_76 = {1'h0, count_19} + {1'h0, inc_19}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_77 = _count_T_76[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_78 = {1'h0, _count_T_77} - {1'h0, dec_19}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_79 = _count_T_78[0]; // @[ToAXI4.scala:278:37]
wire _idStall_19_T = ~idle_19; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_19_T_3 = _idStall_19_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_19 = _idStall_19_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_20; // @[ToAXI4.scala:272:28]
wire _idStall_20_T_2 = count_20; // @[ToAXI4.scala:272:28, :286:44]
reg write_20; // @[ToAXI4.scala:273:24]
wire idle_20 = ~count_20; // @[ToAXI4.scala:272:28, :274:26]
wire inc_20 = a_sel_20 & _inc_T_20; // @[Decoupled.scala:51:35]
wire _dec_T_40 = d_sel_20 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_20 = _dec_T_40 & _dec_T_41; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_80 = {1'h0, count_20} + {1'h0, inc_20}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_81 = _count_T_80[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_82 = {1'h0, _count_T_81} - {1'h0, dec_20}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_83 = _count_T_82[0]; // @[ToAXI4.scala:278:37]
wire _idStall_20_T = ~idle_20; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_20_T_3 = _idStall_20_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_20 = _idStall_20_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_21; // @[ToAXI4.scala:272:28]
wire _idStall_21_T_2 = count_21; // @[ToAXI4.scala:272:28, :286:44]
reg write_21; // @[ToAXI4.scala:273:24]
wire idle_21 = ~count_21; // @[ToAXI4.scala:272:28, :274:26]
wire inc_21 = a_sel_21 & _inc_T_21; // @[Decoupled.scala:51:35]
wire _dec_T_42 = d_sel_21 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_21 = _dec_T_42 & _dec_T_43; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_84 = {1'h0, count_21} + {1'h0, inc_21}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_85 = _count_T_84[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_86 = {1'h0, _count_T_85} - {1'h0, dec_21}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_87 = _count_T_86[0]; // @[ToAXI4.scala:278:37]
wire _idStall_21_T = ~idle_21; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_21_T_3 = _idStall_21_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_21 = _idStall_21_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_22; // @[ToAXI4.scala:272:28]
wire _idStall_22_T_2 = count_22; // @[ToAXI4.scala:272:28, :286:44]
reg write_22; // @[ToAXI4.scala:273:24]
wire idle_22 = ~count_22; // @[ToAXI4.scala:272:28, :274:26]
wire inc_22 = a_sel_22 & _inc_T_22; // @[Decoupled.scala:51:35]
wire _dec_T_44 = d_sel_22 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_22 = _dec_T_44 & _dec_T_45; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_88 = {1'h0, count_22} + {1'h0, inc_22}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_89 = _count_T_88[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_90 = {1'h0, _count_T_89} - {1'h0, dec_22}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_91 = _count_T_90[0]; // @[ToAXI4.scala:278:37]
wire _idStall_22_T = ~idle_22; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_22_T_3 = _idStall_22_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_22 = _idStall_22_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_23; // @[ToAXI4.scala:272:28]
wire _idStall_23_T_2 = count_23; // @[ToAXI4.scala:272:28, :286:44]
reg write_23; // @[ToAXI4.scala:273:24]
wire idle_23 = ~count_23; // @[ToAXI4.scala:272:28, :274:26]
wire inc_23 = a_sel_23 & _inc_T_23; // @[Decoupled.scala:51:35]
wire _dec_T_46 = d_sel_23 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_23 = _dec_T_46 & _dec_T_47; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_92 = {1'h0, count_23} + {1'h0, inc_23}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_93 = _count_T_92[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_94 = {1'h0, _count_T_93} - {1'h0, dec_23}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_95 = _count_T_94[0]; // @[ToAXI4.scala:278:37]
wire _idStall_23_T = ~idle_23; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_23_T_3 = _idStall_23_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_23 = _idStall_23_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_24; // @[ToAXI4.scala:272:28]
wire _idStall_24_T_2 = count_24; // @[ToAXI4.scala:272:28, :286:44]
reg write_24; // @[ToAXI4.scala:273:24]
wire idle_24 = ~count_24; // @[ToAXI4.scala:272:28, :274:26]
wire inc_24 = a_sel_24 & _inc_T_24; // @[Decoupled.scala:51:35]
wire _dec_T_48 = d_sel_24 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_24 = _dec_T_48 & _dec_T_49; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_96 = {1'h0, count_24} + {1'h0, inc_24}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_97 = _count_T_96[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_98 = {1'h0, _count_T_97} - {1'h0, dec_24}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_99 = _count_T_98[0]; // @[ToAXI4.scala:278:37]
wire _idStall_24_T = ~idle_24; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_24_T_3 = _idStall_24_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_24 = _idStall_24_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_25; // @[ToAXI4.scala:272:28]
wire _idStall_25_T_2 = count_25; // @[ToAXI4.scala:272:28, :286:44]
reg write_25; // @[ToAXI4.scala:273:24]
wire idle_25 = ~count_25; // @[ToAXI4.scala:272:28, :274:26]
wire inc_25 = a_sel_25 & _inc_T_25; // @[Decoupled.scala:51:35]
wire _dec_T_50 = d_sel_25 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_25 = _dec_T_50 & _dec_T_51; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_100 = {1'h0, count_25} + {1'h0, inc_25}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_101 = _count_T_100[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_102 = {1'h0, _count_T_101} - {1'h0, dec_25}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_103 = _count_T_102[0]; // @[ToAXI4.scala:278:37]
wire _idStall_25_T = ~idle_25; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_25_T_3 = _idStall_25_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_25 = _idStall_25_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_26; // @[ToAXI4.scala:272:28]
wire _idStall_26_T_2 = count_26; // @[ToAXI4.scala:272:28, :286:44]
reg write_26; // @[ToAXI4.scala:273:24]
wire idle_26 = ~count_26; // @[ToAXI4.scala:272:28, :274:26]
wire inc_26 = a_sel_26 & _inc_T_26; // @[Decoupled.scala:51:35]
wire _dec_T_52 = d_sel_26 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_26 = _dec_T_52 & _dec_T_53; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_104 = {1'h0, count_26} + {1'h0, inc_26}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_105 = _count_T_104[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_106 = {1'h0, _count_T_105} - {1'h0, dec_26}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_107 = _count_T_106[0]; // @[ToAXI4.scala:278:37]
wire _idStall_26_T = ~idle_26; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_26_T_3 = _idStall_26_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_26 = _idStall_26_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_27; // @[ToAXI4.scala:272:28]
wire _idStall_27_T_2 = count_27; // @[ToAXI4.scala:272:28, :286:44]
reg write_27; // @[ToAXI4.scala:273:24]
wire idle_27 = ~count_27; // @[ToAXI4.scala:272:28, :274:26]
wire inc_27 = a_sel_27 & _inc_T_27; // @[Decoupled.scala:51:35]
wire _dec_T_54 = d_sel_27 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_27 = _dec_T_54 & _dec_T_55; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_108 = {1'h0, count_27} + {1'h0, inc_27}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_109 = _count_T_108[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_110 = {1'h0, _count_T_109} - {1'h0, dec_27}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_111 = _count_T_110[0]; // @[ToAXI4.scala:278:37]
wire _idStall_27_T = ~idle_27; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_27_T_3 = _idStall_27_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_27 = _idStall_27_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_28; // @[ToAXI4.scala:272:28]
wire _idStall_28_T_2 = count_28; // @[ToAXI4.scala:272:28, :286:44]
reg write_28; // @[ToAXI4.scala:273:24]
wire idle_28 = ~count_28; // @[ToAXI4.scala:272:28, :274:26]
wire inc_28 = a_sel_28 & _inc_T_28; // @[Decoupled.scala:51:35]
wire _dec_T_56 = d_sel_28 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_28 = _dec_T_56 & _dec_T_57; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_112 = {1'h0, count_28} + {1'h0, inc_28}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_113 = _count_T_112[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_114 = {1'h0, _count_T_113} - {1'h0, dec_28}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_115 = _count_T_114[0]; // @[ToAXI4.scala:278:37]
wire _idStall_28_T = ~idle_28; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_28_T_3 = _idStall_28_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_28 = _idStall_28_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_29; // @[ToAXI4.scala:272:28]
wire _idStall_29_T_2 = count_29; // @[ToAXI4.scala:272:28, :286:44]
reg write_29; // @[ToAXI4.scala:273:24]
wire idle_29 = ~count_29; // @[ToAXI4.scala:272:28, :274:26]
wire inc_29 = a_sel_29 & _inc_T_29; // @[Decoupled.scala:51:35]
wire _dec_T_58 = d_sel_29 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_29 = _dec_T_58 & _dec_T_59; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_116 = {1'h0, count_29} + {1'h0, inc_29}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_117 = _count_T_116[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_118 = {1'h0, _count_T_117} - {1'h0, dec_29}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_119 = _count_T_118[0]; // @[ToAXI4.scala:278:37]
wire _idStall_29_T = ~idle_29; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_29_T_3 = _idStall_29_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_29 = _idStall_29_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_30; // @[ToAXI4.scala:272:28]
wire _idStall_30_T_2 = count_30; // @[ToAXI4.scala:272:28, :286:44]
reg write_30; // @[ToAXI4.scala:273:24]
wire idle_30 = ~count_30; // @[ToAXI4.scala:272:28, :274:26]
wire inc_30 = a_sel_30 & _inc_T_30; // @[Decoupled.scala:51:35]
wire _dec_T_60 = d_sel_30 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_30 = _dec_T_60 & _dec_T_61; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_120 = {1'h0, count_30} + {1'h0, inc_30}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_121 = _count_T_120[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_122 = {1'h0, _count_T_121} - {1'h0, dec_30}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_123 = _count_T_122[0]; // @[ToAXI4.scala:278:37]
wire _idStall_30_T = ~idle_30; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_30_T_3 = _idStall_30_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_30 = _idStall_30_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_31; // @[ToAXI4.scala:272:28]
wire _idStall_31_T_2 = count_31; // @[ToAXI4.scala:272:28, :286:44]
reg write_31; // @[ToAXI4.scala:273:24]
wire idle_31 = ~count_31; // @[ToAXI4.scala:272:28, :274:26]
wire inc_31 = a_sel_31 & _inc_T_31; // @[Decoupled.scala:51:35]
wire _dec_T_62 = d_sel_31 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_31 = _dec_T_62 & _dec_T_63; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_124 = {1'h0, count_31} + {1'h0, inc_31}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_125 = _count_T_124[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_126 = {1'h0, _count_T_125} - {1'h0, dec_31}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_127 = _count_T_126[0]; // @[ToAXI4.scala:278:37]
wire _idStall_31_T = ~idle_31; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_31_T_3 = _idStall_31_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_31 = _idStall_31_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_32; // @[ToAXI4.scala:272:28]
wire _idStall_32_T_2 = count_32; // @[ToAXI4.scala:272:28, :286:44]
reg write_32; // @[ToAXI4.scala:273:24]
wire idle_32 = ~count_32; // @[ToAXI4.scala:272:28, :274:26]
wire inc_32 = a_sel_32 & _inc_T_32; // @[Decoupled.scala:51:35]
wire _dec_T_64 = d_sel_32 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_32 = _dec_T_64 & _dec_T_65; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_128 = {1'h0, count_32} + {1'h0, inc_32}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_129 = _count_T_128[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_130 = {1'h0, _count_T_129} - {1'h0, dec_32}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_131 = _count_T_130[0]; // @[ToAXI4.scala:278:37]
wire _idStall_32_T = ~idle_32; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_32_T_3 = _idStall_32_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_32 = _idStall_32_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_33; // @[ToAXI4.scala:272:28]
wire _idStall_33_T_2 = count_33; // @[ToAXI4.scala:272:28, :286:44]
reg write_33; // @[ToAXI4.scala:273:24]
wire idle_33 = ~count_33; // @[ToAXI4.scala:272:28, :274:26]
wire inc_33 = a_sel_33 & _inc_T_33; // @[Decoupled.scala:51:35]
wire _dec_T_66 = d_sel_33 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_33 = _dec_T_66 & _dec_T_67; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_132 = {1'h0, count_33} + {1'h0, inc_33}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_133 = _count_T_132[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_134 = {1'h0, _count_T_133} - {1'h0, dec_33}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_135 = _count_T_134[0]; // @[ToAXI4.scala:278:37]
wire _idStall_33_T = ~idle_33; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_33_T_3 = _idStall_33_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_33 = _idStall_33_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_34; // @[ToAXI4.scala:272:28]
wire _idStall_34_T_2 = count_34; // @[ToAXI4.scala:272:28, :286:44]
reg write_34; // @[ToAXI4.scala:273:24]
wire idle_34 = ~count_34; // @[ToAXI4.scala:272:28, :274:26]
wire inc_34 = a_sel_34 & _inc_T_34; // @[Decoupled.scala:51:35]
wire _dec_T_68 = d_sel_34 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_34 = _dec_T_68 & _dec_T_69; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_136 = {1'h0, count_34} + {1'h0, inc_34}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_137 = _count_T_136[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_138 = {1'h0, _count_T_137} - {1'h0, dec_34}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_139 = _count_T_138[0]; // @[ToAXI4.scala:278:37]
wire _idStall_34_T = ~idle_34; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_34_T_3 = _idStall_34_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_34 = _idStall_34_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_35; // @[ToAXI4.scala:272:28]
wire _idStall_35_T_2 = count_35; // @[ToAXI4.scala:272:28, :286:44]
reg write_35; // @[ToAXI4.scala:273:24]
wire idle_35 = ~count_35; // @[ToAXI4.scala:272:28, :274:26]
wire inc_35 = a_sel_35 & _inc_T_35; // @[Decoupled.scala:51:35]
wire _dec_T_70 = d_sel_35 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_35 = _dec_T_70 & _dec_T_71; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_140 = {1'h0, count_35} + {1'h0, inc_35}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_141 = _count_T_140[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_142 = {1'h0, _count_T_141} - {1'h0, dec_35}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_143 = _count_T_142[0]; // @[ToAXI4.scala:278:37]
wire _idStall_35_T = ~idle_35; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_35_T_3 = _idStall_35_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_35 = _idStall_35_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_36; // @[ToAXI4.scala:272:28]
wire _idStall_36_T_2 = count_36; // @[ToAXI4.scala:272:28, :286:44]
reg write_36; // @[ToAXI4.scala:273:24]
wire idle_36 = ~count_36; // @[ToAXI4.scala:272:28, :274:26]
wire inc_36 = a_sel_36 & _inc_T_36; // @[Decoupled.scala:51:35]
wire _dec_T_72 = d_sel_36 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_36 = _dec_T_72 & _dec_T_73; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_144 = {1'h0, count_36} + {1'h0, inc_36}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_145 = _count_T_144[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_146 = {1'h0, _count_T_145} - {1'h0, dec_36}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_147 = _count_T_146[0]; // @[ToAXI4.scala:278:37]
wire _idStall_36_T = ~idle_36; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_36_T_3 = _idStall_36_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_36 = _idStall_36_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_37; // @[ToAXI4.scala:272:28]
wire _idStall_37_T_2 = count_37; // @[ToAXI4.scala:272:28, :286:44]
reg write_37; // @[ToAXI4.scala:273:24]
wire idle_37 = ~count_37; // @[ToAXI4.scala:272:28, :274:26]
wire inc_37 = a_sel_37 & _inc_T_37; // @[Decoupled.scala:51:35]
wire _dec_T_74 = d_sel_37 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_37 = _dec_T_74 & _dec_T_75; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_148 = {1'h0, count_37} + {1'h0, inc_37}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_149 = _count_T_148[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_150 = {1'h0, _count_T_149} - {1'h0, dec_37}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_151 = _count_T_150[0]; // @[ToAXI4.scala:278:37]
wire _idStall_37_T = ~idle_37; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_37_T_3 = _idStall_37_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_37 = _idStall_37_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_38; // @[ToAXI4.scala:272:28]
wire _idStall_38_T_2 = count_38; // @[ToAXI4.scala:272:28, :286:44]
reg write_38; // @[ToAXI4.scala:273:24]
wire idle_38 = ~count_38; // @[ToAXI4.scala:272:28, :274:26]
wire inc_38 = a_sel_38 & _inc_T_38; // @[Decoupled.scala:51:35]
wire _dec_T_76 = d_sel_38 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_38 = _dec_T_76 & _dec_T_77; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_152 = {1'h0, count_38} + {1'h0, inc_38}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_153 = _count_T_152[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_154 = {1'h0, _count_T_153} - {1'h0, dec_38}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_155 = _count_T_154[0]; // @[ToAXI4.scala:278:37]
wire _idStall_38_T = ~idle_38; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_38_T_3 = _idStall_38_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_38 = _idStall_38_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_39; // @[ToAXI4.scala:272:28]
wire _idStall_39_T_2 = count_39; // @[ToAXI4.scala:272:28, :286:44]
reg write_39; // @[ToAXI4.scala:273:24]
wire idle_39 = ~count_39; // @[ToAXI4.scala:272:28, :274:26]
wire inc_39 = a_sel_39 & _inc_T_39; // @[Decoupled.scala:51:35]
wire _dec_T_78 = d_sel_39 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_39 = _dec_T_78 & _dec_T_79; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_156 = {1'h0, count_39} + {1'h0, inc_39}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_157 = _count_T_156[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_158 = {1'h0, _count_T_157} - {1'h0, dec_39}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_159 = _count_T_158[0]; // @[ToAXI4.scala:278:37]
wire _idStall_39_T = ~idle_39; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_39_T_3 = _idStall_39_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_39 = _idStall_39_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_40; // @[ToAXI4.scala:272:28]
wire _idStall_40_T_2 = count_40; // @[ToAXI4.scala:272:28, :286:44]
reg write_40; // @[ToAXI4.scala:273:24]
wire idle_40 = ~count_40; // @[ToAXI4.scala:272:28, :274:26]
wire inc_40 = a_sel_40 & _inc_T_40; // @[Decoupled.scala:51:35]
wire _dec_T_80 = d_sel_40 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_40 = _dec_T_80 & _dec_T_81; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_160 = {1'h0, count_40} + {1'h0, inc_40}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_161 = _count_T_160[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_162 = {1'h0, _count_T_161} - {1'h0, dec_40}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_163 = _count_T_162[0]; // @[ToAXI4.scala:278:37]
wire _idStall_40_T = ~idle_40; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_40_T_3 = _idStall_40_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_40 = _idStall_40_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_41; // @[ToAXI4.scala:272:28]
wire _idStall_41_T_2 = count_41; // @[ToAXI4.scala:272:28, :286:44]
reg write_41; // @[ToAXI4.scala:273:24]
wire idle_41 = ~count_41; // @[ToAXI4.scala:272:28, :274:26]
wire inc_41 = a_sel_41 & _inc_T_41; // @[Decoupled.scala:51:35]
wire _dec_T_82 = d_sel_41 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_41 = _dec_T_82 & _dec_T_83; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_164 = {1'h0, count_41} + {1'h0, inc_41}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_165 = _count_T_164[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_166 = {1'h0, _count_T_165} - {1'h0, dec_41}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_167 = _count_T_166[0]; // @[ToAXI4.scala:278:37]
wire _idStall_41_T = ~idle_41; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_41_T_3 = _idStall_41_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_41 = _idStall_41_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_42; // @[ToAXI4.scala:272:28]
wire _idStall_42_T_2 = count_42; // @[ToAXI4.scala:272:28, :286:44]
reg write_42; // @[ToAXI4.scala:273:24]
wire idle_42 = ~count_42; // @[ToAXI4.scala:272:28, :274:26]
wire inc_42 = a_sel_42 & _inc_T_42; // @[Decoupled.scala:51:35]
wire _dec_T_84 = d_sel_42 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_42 = _dec_T_84 & _dec_T_85; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_168 = {1'h0, count_42} + {1'h0, inc_42}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_169 = _count_T_168[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_170 = {1'h0, _count_T_169} - {1'h0, dec_42}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_171 = _count_T_170[0]; // @[ToAXI4.scala:278:37]
wire _idStall_42_T = ~idle_42; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_42_T_3 = _idStall_42_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_42 = _idStall_42_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_43; // @[ToAXI4.scala:272:28]
wire _idStall_43_T_2 = count_43; // @[ToAXI4.scala:272:28, :286:44]
reg write_43; // @[ToAXI4.scala:273:24]
wire idle_43 = ~count_43; // @[ToAXI4.scala:272:28, :274:26]
wire inc_43 = a_sel_43 & _inc_T_43; // @[Decoupled.scala:51:35]
wire _dec_T_86 = d_sel_43 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_43 = _dec_T_86 & _dec_T_87; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_172 = {1'h0, count_43} + {1'h0, inc_43}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_173 = _count_T_172[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_174 = {1'h0, _count_T_173} - {1'h0, dec_43}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_175 = _count_T_174[0]; // @[ToAXI4.scala:278:37]
wire _idStall_43_T = ~idle_43; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_43_T_3 = _idStall_43_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_43 = _idStall_43_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_44; // @[ToAXI4.scala:272:28]
wire _idStall_44_T_2 = count_44; // @[ToAXI4.scala:272:28, :286:44]
reg write_44; // @[ToAXI4.scala:273:24]
wire idle_44 = ~count_44; // @[ToAXI4.scala:272:28, :274:26]
wire inc_44 = a_sel_44 & _inc_T_44; // @[Decoupled.scala:51:35]
wire _dec_T_88 = d_sel_44 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_44 = _dec_T_88 & _dec_T_89; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_176 = {1'h0, count_44} + {1'h0, inc_44}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_177 = _count_T_176[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_178 = {1'h0, _count_T_177} - {1'h0, dec_44}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_179 = _count_T_178[0]; // @[ToAXI4.scala:278:37]
wire _idStall_44_T = ~idle_44; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_44_T_3 = _idStall_44_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_44 = _idStall_44_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_45; // @[ToAXI4.scala:272:28]
wire _idStall_45_T_2 = count_45; // @[ToAXI4.scala:272:28, :286:44]
reg write_45; // @[ToAXI4.scala:273:24]
wire idle_45 = ~count_45; // @[ToAXI4.scala:272:28, :274:26]
wire inc_45 = a_sel_45 & _inc_T_45; // @[Decoupled.scala:51:35]
wire _dec_T_90 = d_sel_45 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_45 = _dec_T_90 & _dec_T_91; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_180 = {1'h0, count_45} + {1'h0, inc_45}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_181 = _count_T_180[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_182 = {1'h0, _count_T_181} - {1'h0, dec_45}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_183 = _count_T_182[0]; // @[ToAXI4.scala:278:37]
wire _idStall_45_T = ~idle_45; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_45_T_3 = _idStall_45_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_45 = _idStall_45_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_46; // @[ToAXI4.scala:272:28]
wire _idStall_46_T_2 = count_46; // @[ToAXI4.scala:272:28, :286:44]
reg write_46; // @[ToAXI4.scala:273:24]
wire idle_46 = ~count_46; // @[ToAXI4.scala:272:28, :274:26]
wire inc_46 = a_sel_46 & _inc_T_46; // @[Decoupled.scala:51:35]
wire _dec_T_92 = d_sel_46 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_46 = _dec_T_92 & _dec_T_93; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_184 = {1'h0, count_46} + {1'h0, inc_46}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_185 = _count_T_184[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_186 = {1'h0, _count_T_185} - {1'h0, dec_46}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_187 = _count_T_186[0]; // @[ToAXI4.scala:278:37]
wire _idStall_46_T = ~idle_46; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_46_T_3 = _idStall_46_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_46 = _idStall_46_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_47; // @[ToAXI4.scala:272:28]
wire _idStall_47_T_2 = count_47; // @[ToAXI4.scala:272:28, :286:44]
reg write_47; // @[ToAXI4.scala:273:24]
wire idle_47 = ~count_47; // @[ToAXI4.scala:272:28, :274:26]
wire inc_47 = a_sel_47 & _inc_T_47; // @[Decoupled.scala:51:35]
wire _dec_T_94 = d_sel_47 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_47 = _dec_T_94 & _dec_T_95; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_188 = {1'h0, count_47} + {1'h0, inc_47}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_189 = _count_T_188[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_190 = {1'h0, _count_T_189} - {1'h0, dec_47}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_191 = _count_T_190[0]; // @[ToAXI4.scala:278:37]
wire _idStall_47_T = ~idle_47; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_47_T_3 = _idStall_47_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_47 = _idStall_47_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_48; // @[ToAXI4.scala:272:28]
wire _idStall_48_T_2 = count_48; // @[ToAXI4.scala:272:28, :286:44]
reg write_48; // @[ToAXI4.scala:273:24]
wire idle_48 = ~count_48; // @[ToAXI4.scala:272:28, :274:26]
wire inc_48 = a_sel_48 & _inc_T_48; // @[Decoupled.scala:51:35]
wire _dec_T_96 = d_sel_48 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_48 = _dec_T_96 & _dec_T_97; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_192 = {1'h0, count_48} + {1'h0, inc_48}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_193 = _count_T_192[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_194 = {1'h0, _count_T_193} - {1'h0, dec_48}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_195 = _count_T_194[0]; // @[ToAXI4.scala:278:37]
wire _idStall_48_T = ~idle_48; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_48_T_3 = _idStall_48_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_48 = _idStall_48_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_49; // @[ToAXI4.scala:272:28]
wire _idStall_49_T_2 = count_49; // @[ToAXI4.scala:272:28, :286:44]
reg write_49; // @[ToAXI4.scala:273:24]
wire idle_49 = ~count_49; // @[ToAXI4.scala:272:28, :274:26]
wire inc_49 = a_sel_49 & _inc_T_49; // @[Decoupled.scala:51:35]
wire _dec_T_98 = d_sel_49 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_49 = _dec_T_98 & _dec_T_99; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_196 = {1'h0, count_49} + {1'h0, inc_49}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_197 = _count_T_196[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_198 = {1'h0, _count_T_197} - {1'h0, dec_49}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_199 = _count_T_198[0]; // @[ToAXI4.scala:278:37]
wire _idStall_49_T = ~idle_49; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_49_T_3 = _idStall_49_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_49 = _idStall_49_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_50; // @[ToAXI4.scala:272:28]
wire _idStall_50_T_2 = count_50; // @[ToAXI4.scala:272:28, :286:44]
reg write_50; // @[ToAXI4.scala:273:24]
wire idle_50 = ~count_50; // @[ToAXI4.scala:272:28, :274:26]
wire inc_50 = a_sel_50 & _inc_T_50; // @[Decoupled.scala:51:35]
wire _dec_T_100 = d_sel_50 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_50 = _dec_T_100 & _dec_T_101; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_200 = {1'h0, count_50} + {1'h0, inc_50}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_201 = _count_T_200[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_202 = {1'h0, _count_T_201} - {1'h0, dec_50}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_203 = _count_T_202[0]; // @[ToAXI4.scala:278:37]
wire _idStall_50_T = ~idle_50; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_50_T_3 = _idStall_50_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_50 = _idStall_50_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_51; // @[ToAXI4.scala:272:28]
wire _idStall_51_T_2 = count_51; // @[ToAXI4.scala:272:28, :286:44]
reg write_51; // @[ToAXI4.scala:273:24]
wire idle_51 = ~count_51; // @[ToAXI4.scala:272:28, :274:26]
wire inc_51 = a_sel_51 & _inc_T_51; // @[Decoupled.scala:51:35]
wire _dec_T_102 = d_sel_51 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_51 = _dec_T_102 & _dec_T_103; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_204 = {1'h0, count_51} + {1'h0, inc_51}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_205 = _count_T_204[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_206 = {1'h0, _count_T_205} - {1'h0, dec_51}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_207 = _count_T_206[0]; // @[ToAXI4.scala:278:37]
wire _idStall_51_T = ~idle_51; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_51_T_3 = _idStall_51_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_51 = _idStall_51_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_52; // @[ToAXI4.scala:272:28]
wire _idStall_52_T_2 = count_52; // @[ToAXI4.scala:272:28, :286:44]
reg write_52; // @[ToAXI4.scala:273:24]
wire idle_52 = ~count_52; // @[ToAXI4.scala:272:28, :274:26]
wire inc_52 = a_sel_52 & _inc_T_52; // @[Decoupled.scala:51:35]
wire _dec_T_104 = d_sel_52 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_52 = _dec_T_104 & _dec_T_105; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_208 = {1'h0, count_52} + {1'h0, inc_52}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_209 = _count_T_208[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_210 = {1'h0, _count_T_209} - {1'h0, dec_52}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_211 = _count_T_210[0]; // @[ToAXI4.scala:278:37]
wire _idStall_52_T = ~idle_52; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_52_T_3 = _idStall_52_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_52 = _idStall_52_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_53; // @[ToAXI4.scala:272:28]
wire _idStall_53_T_2 = count_53; // @[ToAXI4.scala:272:28, :286:44]
reg write_53; // @[ToAXI4.scala:273:24]
wire idle_53 = ~count_53; // @[ToAXI4.scala:272:28, :274:26]
wire inc_53 = a_sel_53 & _inc_T_53; // @[Decoupled.scala:51:35]
wire _dec_T_106 = d_sel_53 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_53 = _dec_T_106 & _dec_T_107; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_212 = {1'h0, count_53} + {1'h0, inc_53}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_213 = _count_T_212[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_214 = {1'h0, _count_T_213} - {1'h0, dec_53}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_215 = _count_T_214[0]; // @[ToAXI4.scala:278:37]
wire _idStall_53_T = ~idle_53; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_53_T_3 = _idStall_53_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_53 = _idStall_53_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_54; // @[ToAXI4.scala:272:28]
wire _idStall_54_T_2 = count_54; // @[ToAXI4.scala:272:28, :286:44]
reg write_54; // @[ToAXI4.scala:273:24]
wire idle_54 = ~count_54; // @[ToAXI4.scala:272:28, :274:26]
wire inc_54 = a_sel_54 & _inc_T_54; // @[Decoupled.scala:51:35]
wire _dec_T_108 = d_sel_54 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_54 = _dec_T_108 & _dec_T_109; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_216 = {1'h0, count_54} + {1'h0, inc_54}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_217 = _count_T_216[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_218 = {1'h0, _count_T_217} - {1'h0, dec_54}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_219 = _count_T_218[0]; // @[ToAXI4.scala:278:37]
wire _idStall_54_T = ~idle_54; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_54_T_3 = _idStall_54_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_54 = _idStall_54_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_55; // @[ToAXI4.scala:272:28]
wire _idStall_55_T_2 = count_55; // @[ToAXI4.scala:272:28, :286:44]
reg write_55; // @[ToAXI4.scala:273:24]
wire idle_55 = ~count_55; // @[ToAXI4.scala:272:28, :274:26]
wire inc_55 = a_sel_55 & _inc_T_55; // @[Decoupled.scala:51:35]
wire _dec_T_110 = d_sel_55 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_55 = _dec_T_110 & _dec_T_111; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_220 = {1'h0, count_55} + {1'h0, inc_55}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_221 = _count_T_220[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_222 = {1'h0, _count_T_221} - {1'h0, dec_55}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_223 = _count_T_222[0]; // @[ToAXI4.scala:278:37]
wire _idStall_55_T = ~idle_55; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_55_T_3 = _idStall_55_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_55 = _idStall_55_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_56; // @[ToAXI4.scala:272:28]
wire _idStall_56_T_2 = count_56; // @[ToAXI4.scala:272:28, :286:44]
reg write_56; // @[ToAXI4.scala:273:24]
wire idle_56 = ~count_56; // @[ToAXI4.scala:272:28, :274:26]
wire inc_56 = a_sel_56 & _inc_T_56; // @[Decoupled.scala:51:35]
wire _dec_T_112 = d_sel_56 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_56 = _dec_T_112 & _dec_T_113; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_224 = {1'h0, count_56} + {1'h0, inc_56}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_225 = _count_T_224[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_226 = {1'h0, _count_T_225} - {1'h0, dec_56}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_227 = _count_T_226[0]; // @[ToAXI4.scala:278:37]
wire _idStall_56_T = ~idle_56; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_56_T_3 = _idStall_56_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_56 = _idStall_56_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_57; // @[ToAXI4.scala:272:28]
wire _idStall_57_T_2 = count_57; // @[ToAXI4.scala:272:28, :286:44]
reg write_57; // @[ToAXI4.scala:273:24]
wire idle_57 = ~count_57; // @[ToAXI4.scala:272:28, :274:26]
wire inc_57 = a_sel_57 & _inc_T_57; // @[Decoupled.scala:51:35]
wire _dec_T_114 = d_sel_57 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_57 = _dec_T_114 & _dec_T_115; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_228 = {1'h0, count_57} + {1'h0, inc_57}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_229 = _count_T_228[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_230 = {1'h0, _count_T_229} - {1'h0, dec_57}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_231 = _count_T_230[0]; // @[ToAXI4.scala:278:37]
wire _idStall_57_T = ~idle_57; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_57_T_3 = _idStall_57_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_57 = _idStall_57_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_58; // @[ToAXI4.scala:272:28]
wire _idStall_58_T_2 = count_58; // @[ToAXI4.scala:272:28, :286:44]
reg write_58; // @[ToAXI4.scala:273:24]
wire idle_58 = ~count_58; // @[ToAXI4.scala:272:28, :274:26]
wire inc_58 = a_sel_58 & _inc_T_58; // @[Decoupled.scala:51:35]
wire _dec_T_116 = d_sel_58 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_58 = _dec_T_116 & _dec_T_117; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_232 = {1'h0, count_58} + {1'h0, inc_58}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_233 = _count_T_232[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_234 = {1'h0, _count_T_233} - {1'h0, dec_58}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_235 = _count_T_234[0]; // @[ToAXI4.scala:278:37]
wire _idStall_58_T = ~idle_58; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_58_T_3 = _idStall_58_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_58 = _idStall_58_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_59; // @[ToAXI4.scala:272:28]
wire _idStall_59_T_2 = count_59; // @[ToAXI4.scala:272:28, :286:44]
reg write_59; // @[ToAXI4.scala:273:24]
wire idle_59 = ~count_59; // @[ToAXI4.scala:272:28, :274:26]
wire inc_59 = a_sel_59 & _inc_T_59; // @[Decoupled.scala:51:35]
wire _dec_T_118 = d_sel_59 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_59 = _dec_T_118 & _dec_T_119; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_236 = {1'h0, count_59} + {1'h0, inc_59}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_237 = _count_T_236[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_238 = {1'h0, _count_T_237} - {1'h0, dec_59}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_239 = _count_T_238[0]; // @[ToAXI4.scala:278:37]
wire _idStall_59_T = ~idle_59; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_59_T_3 = _idStall_59_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_59 = _idStall_59_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_60; // @[ToAXI4.scala:272:28]
wire _idStall_60_T_2 = count_60; // @[ToAXI4.scala:272:28, :286:44]
reg write_60; // @[ToAXI4.scala:273:24]
wire idle_60 = ~count_60; // @[ToAXI4.scala:272:28, :274:26]
wire inc_60 = a_sel_60 & _inc_T_60; // @[Decoupled.scala:51:35]
wire _dec_T_120 = d_sel_60 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_60 = _dec_T_120 & _dec_T_121; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_240 = {1'h0, count_60} + {1'h0, inc_60}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_241 = _count_T_240[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_242 = {1'h0, _count_T_241} - {1'h0, dec_60}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_243 = _count_T_242[0]; // @[ToAXI4.scala:278:37]
wire _idStall_60_T = ~idle_60; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_60_T_3 = _idStall_60_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_60 = _idStall_60_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_61; // @[ToAXI4.scala:272:28]
wire _idStall_61_T_2 = count_61; // @[ToAXI4.scala:272:28, :286:44]
reg write_61; // @[ToAXI4.scala:273:24]
wire idle_61 = ~count_61; // @[ToAXI4.scala:272:28, :274:26]
wire inc_61 = a_sel_61 & _inc_T_61; // @[Decoupled.scala:51:35]
wire _dec_T_122 = d_sel_61 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_61 = _dec_T_122 & _dec_T_123; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_244 = {1'h0, count_61} + {1'h0, inc_61}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_245 = _count_T_244[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_246 = {1'h0, _count_T_245} - {1'h0, dec_61}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_247 = _count_T_246[0]; // @[ToAXI4.scala:278:37]
wire _idStall_61_T = ~idle_61; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_61_T_3 = _idStall_61_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_61 = _idStall_61_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_62; // @[ToAXI4.scala:272:28]
wire _idStall_62_T_2 = count_62; // @[ToAXI4.scala:272:28, :286:44]
reg write_62; // @[ToAXI4.scala:273:24]
wire idle_62 = ~count_62; // @[ToAXI4.scala:272:28, :274:26]
wire inc_62 = a_sel_62 & _inc_T_62; // @[Decoupled.scala:51:35]
wire _dec_T_124 = d_sel_62 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_62 = _dec_T_124 & _dec_T_125; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_248 = {1'h0, count_62} + {1'h0, inc_62}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_249 = _count_T_248[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_250 = {1'h0, _count_T_249} - {1'h0, dec_62}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_251 = _count_T_250[0]; // @[ToAXI4.scala:278:37]
wire _idStall_62_T = ~idle_62; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_62_T_3 = _idStall_62_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_62 = _idStall_62_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_63; // @[ToAXI4.scala:272:28]
wire _idStall_63_T_2 = count_63; // @[ToAXI4.scala:272:28, :286:44]
reg write_63; // @[ToAXI4.scala:273:24]
wire idle_63 = ~count_63; // @[ToAXI4.scala:272:28, :274:26]
wire inc_63 = a_sel_63 & _inc_T_63; // @[Decoupled.scala:51:35]
wire _dec_T_126 = d_sel_63 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_63 = _dec_T_126 & _dec_T_127; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_252 = {1'h0, count_63} + {1'h0, inc_63}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_253 = _count_T_252[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_254 = {1'h0, _count_T_253} - {1'h0, dec_63}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_255 = _count_T_254[0]; // @[ToAXI4.scala:278:37]
wire _idStall_63_T = ~idle_63; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_63_T_3 = _idStall_63_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_63 = _idStall_63_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_64; // @[ToAXI4.scala:272:28]
wire _idStall_64_T_2 = count_64; // @[ToAXI4.scala:272:28, :286:44]
reg write_64; // @[ToAXI4.scala:273:24]
wire idle_64 = ~count_64; // @[ToAXI4.scala:272:28, :274:26]
wire inc_64 = a_sel_64 & _inc_T_64; // @[Decoupled.scala:51:35]
wire _dec_T_128 = d_sel_64 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_64 = _dec_T_128 & _dec_T_129; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_256 = {1'h0, count_64} + {1'h0, inc_64}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_257 = _count_T_256[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_258 = {1'h0, _count_T_257} - {1'h0, dec_64}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_259 = _count_T_258[0]; // @[ToAXI4.scala:278:37]
wire _idStall_64_T = ~idle_64; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_64_T_3 = _idStall_64_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_64 = _idStall_64_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_65; // @[ToAXI4.scala:272:28]
wire _idStall_65_T_2 = count_65; // @[ToAXI4.scala:272:28, :286:44]
reg write_65; // @[ToAXI4.scala:273:24]
wire idle_65 = ~count_65; // @[ToAXI4.scala:272:28, :274:26]
wire inc_65 = a_sel_65 & _inc_T_65; // @[Decoupled.scala:51:35]
wire _dec_T_130 = d_sel_65 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_65 = _dec_T_130 & _dec_T_131; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_260 = {1'h0, count_65} + {1'h0, inc_65}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_261 = _count_T_260[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_262 = {1'h0, _count_T_261} - {1'h0, dec_65}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_263 = _count_T_262[0]; // @[ToAXI4.scala:278:37]
wire _idStall_65_T = ~idle_65; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_65_T_3 = _idStall_65_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_65 = _idStall_65_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_66; // @[ToAXI4.scala:272:28]
wire _idStall_66_T_2 = count_66; // @[ToAXI4.scala:272:28, :286:44]
reg write_66; // @[ToAXI4.scala:273:24]
wire idle_66 = ~count_66; // @[ToAXI4.scala:272:28, :274:26]
wire inc_66 = a_sel_66 & _inc_T_66; // @[Decoupled.scala:51:35]
wire _dec_T_132 = d_sel_66 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_66 = _dec_T_132 & _dec_T_133; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_264 = {1'h0, count_66} + {1'h0, inc_66}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_265 = _count_T_264[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_266 = {1'h0, _count_T_265} - {1'h0, dec_66}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_267 = _count_T_266[0]; // @[ToAXI4.scala:278:37]
wire _idStall_66_T = ~idle_66; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_66_T_3 = _idStall_66_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_66 = _idStall_66_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_67; // @[ToAXI4.scala:272:28]
wire _idStall_67_T_2 = count_67; // @[ToAXI4.scala:272:28, :286:44]
reg write_67; // @[ToAXI4.scala:273:24]
wire idle_67 = ~count_67; // @[ToAXI4.scala:272:28, :274:26]
wire inc_67 = a_sel_67 & _inc_T_67; // @[Decoupled.scala:51:35]
wire _dec_T_134 = d_sel_67 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_67 = _dec_T_134 & _dec_T_135; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_268 = {1'h0, count_67} + {1'h0, inc_67}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_269 = _count_T_268[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_270 = {1'h0, _count_T_269} - {1'h0, dec_67}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_271 = _count_T_270[0]; // @[ToAXI4.scala:278:37]
wire _idStall_67_T = ~idle_67; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_67_T_3 = _idStall_67_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_67 = _idStall_67_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_68; // @[ToAXI4.scala:272:28]
wire _idStall_68_T_2 = count_68; // @[ToAXI4.scala:272:28, :286:44]
reg write_68; // @[ToAXI4.scala:273:24]
wire idle_68 = ~count_68; // @[ToAXI4.scala:272:28, :274:26]
wire inc_68 = a_sel_68 & _inc_T_68; // @[Decoupled.scala:51:35]
wire _dec_T_136 = d_sel_68 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_68 = _dec_T_136 & _dec_T_137; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_272 = {1'h0, count_68} + {1'h0, inc_68}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_273 = _count_T_272[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_274 = {1'h0, _count_T_273} - {1'h0, dec_68}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_275 = _count_T_274[0]; // @[ToAXI4.scala:278:37]
wire _idStall_68_T = ~idle_68; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_68_T_3 = _idStall_68_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_68 = _idStall_68_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_69; // @[ToAXI4.scala:272:28]
wire _idStall_69_T_2 = count_69; // @[ToAXI4.scala:272:28, :286:44]
reg write_69; // @[ToAXI4.scala:273:24]
wire idle_69 = ~count_69; // @[ToAXI4.scala:272:28, :274:26]
wire inc_69 = a_sel_69 & _inc_T_69; // @[Decoupled.scala:51:35]
wire _dec_T_138 = d_sel_69 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_69 = _dec_T_138 & _dec_T_139; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_276 = {1'h0, count_69} + {1'h0, inc_69}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_277 = _count_T_276[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_278 = {1'h0, _count_T_277} - {1'h0, dec_69}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_279 = _count_T_278[0]; // @[ToAXI4.scala:278:37]
wire _idStall_69_T = ~idle_69; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_69_T_3 = _idStall_69_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_69 = _idStall_69_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_70; // @[ToAXI4.scala:272:28]
wire _idStall_70_T_2 = count_70; // @[ToAXI4.scala:272:28, :286:44]
reg write_70; // @[ToAXI4.scala:273:24]
wire idle_70 = ~count_70; // @[ToAXI4.scala:272:28, :274:26]
wire inc_70 = a_sel_70 & _inc_T_70; // @[Decoupled.scala:51:35]
wire _dec_T_140 = d_sel_70 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_70 = _dec_T_140 & _dec_T_141; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_280 = {1'h0, count_70} + {1'h0, inc_70}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_281 = _count_T_280[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_282 = {1'h0, _count_T_281} - {1'h0, dec_70}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_283 = _count_T_282[0]; // @[ToAXI4.scala:278:37]
wire _idStall_70_T = ~idle_70; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_70_T_3 = _idStall_70_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_70 = _idStall_70_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_71; // @[ToAXI4.scala:272:28]
wire _idStall_71_T_2 = count_71; // @[ToAXI4.scala:272:28, :286:44]
reg write_71; // @[ToAXI4.scala:273:24]
wire idle_71 = ~count_71; // @[ToAXI4.scala:272:28, :274:26]
wire inc_71 = a_sel_71 & _inc_T_71; // @[Decoupled.scala:51:35]
wire _dec_T_142 = d_sel_71 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_71 = _dec_T_142 & _dec_T_143; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_284 = {1'h0, count_71} + {1'h0, inc_71}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_285 = _count_T_284[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_286 = {1'h0, _count_T_285} - {1'h0, dec_71}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_287 = _count_T_286[0]; // @[ToAXI4.scala:278:37]
wire _idStall_71_T = ~idle_71; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_71_T_3 = _idStall_71_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_71 = _idStall_71_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_72; // @[ToAXI4.scala:272:28]
wire _idStall_72_T_2 = count_72; // @[ToAXI4.scala:272:28, :286:44]
reg write_72; // @[ToAXI4.scala:273:24]
wire idle_72 = ~count_72; // @[ToAXI4.scala:272:28, :274:26]
wire inc_72 = a_sel_72 & _inc_T_72; // @[Decoupled.scala:51:35]
wire _dec_T_144 = d_sel_72 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_72 = _dec_T_144 & _dec_T_145; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_288 = {1'h0, count_72} + {1'h0, inc_72}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_289 = _count_T_288[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_290 = {1'h0, _count_T_289} - {1'h0, dec_72}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_291 = _count_T_290[0]; // @[ToAXI4.scala:278:37]
wire _idStall_72_T = ~idle_72; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_72_T_3 = _idStall_72_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_72 = _idStall_72_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_73; // @[ToAXI4.scala:272:28]
wire _idStall_73_T_2 = count_73; // @[ToAXI4.scala:272:28, :286:44]
reg write_73; // @[ToAXI4.scala:273:24]
wire idle_73 = ~count_73; // @[ToAXI4.scala:272:28, :274:26]
wire inc_73 = a_sel_73 & _inc_T_73; // @[Decoupled.scala:51:35]
wire _dec_T_146 = d_sel_73 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_73 = _dec_T_146 & _dec_T_147; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_292 = {1'h0, count_73} + {1'h0, inc_73}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_293 = _count_T_292[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_294 = {1'h0, _count_T_293} - {1'h0, dec_73}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_295 = _count_T_294[0]; // @[ToAXI4.scala:278:37]
wire _idStall_73_T = ~idle_73; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_73_T_3 = _idStall_73_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_73 = _idStall_73_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_74; // @[ToAXI4.scala:272:28]
wire _idStall_74_T_2 = count_74; // @[ToAXI4.scala:272:28, :286:44]
reg write_74; // @[ToAXI4.scala:273:24]
wire idle_74 = ~count_74; // @[ToAXI4.scala:272:28, :274:26]
wire inc_74 = a_sel_74 & _inc_T_74; // @[Decoupled.scala:51:35]
wire _dec_T_148 = d_sel_74 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_74 = _dec_T_148 & _dec_T_149; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_296 = {1'h0, count_74} + {1'h0, inc_74}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_297 = _count_T_296[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_298 = {1'h0, _count_T_297} - {1'h0, dec_74}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_299 = _count_T_298[0]; // @[ToAXI4.scala:278:37]
wire _idStall_74_T = ~idle_74; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_74_T_3 = _idStall_74_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_74 = _idStall_74_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_75; // @[ToAXI4.scala:272:28]
wire _idStall_75_T_2 = count_75; // @[ToAXI4.scala:272:28, :286:44]
reg write_75; // @[ToAXI4.scala:273:24]
wire idle_75 = ~count_75; // @[ToAXI4.scala:272:28, :274:26]
wire inc_75 = a_sel_75 & _inc_T_75; // @[Decoupled.scala:51:35]
wire _dec_T_150 = d_sel_75 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_75 = _dec_T_150 & _dec_T_151; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_300 = {1'h0, count_75} + {1'h0, inc_75}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_301 = _count_T_300[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_302 = {1'h0, _count_T_301} - {1'h0, dec_75}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_303 = _count_T_302[0]; // @[ToAXI4.scala:278:37]
wire _idStall_75_T = ~idle_75; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_75_T_3 = _idStall_75_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_75 = _idStall_75_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_76; // @[ToAXI4.scala:272:28]
wire _idStall_76_T_2 = count_76; // @[ToAXI4.scala:272:28, :286:44]
reg write_76; // @[ToAXI4.scala:273:24]
wire idle_76 = ~count_76; // @[ToAXI4.scala:272:28, :274:26]
wire inc_76 = a_sel_76 & _inc_T_76; // @[Decoupled.scala:51:35]
wire _dec_T_152 = d_sel_76 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_76 = _dec_T_152 & _dec_T_153; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_304 = {1'h0, count_76} + {1'h0, inc_76}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_305 = _count_T_304[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_306 = {1'h0, _count_T_305} - {1'h0, dec_76}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_307 = _count_T_306[0]; // @[ToAXI4.scala:278:37]
wire _idStall_76_T = ~idle_76; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_76_T_3 = _idStall_76_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_76 = _idStall_76_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_77; // @[ToAXI4.scala:272:28]
wire _idStall_77_T_2 = count_77; // @[ToAXI4.scala:272:28, :286:44]
reg write_77; // @[ToAXI4.scala:273:24]
wire idle_77 = ~count_77; // @[ToAXI4.scala:272:28, :274:26]
wire inc_77 = a_sel_77 & _inc_T_77; // @[Decoupled.scala:51:35]
wire _dec_T_154 = d_sel_77 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_77 = _dec_T_154 & _dec_T_155; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_308 = {1'h0, count_77} + {1'h0, inc_77}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_309 = _count_T_308[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_310 = {1'h0, _count_T_309} - {1'h0, dec_77}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_311 = _count_T_310[0]; // @[ToAXI4.scala:278:37]
wire _idStall_77_T = ~idle_77; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_77_T_3 = _idStall_77_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_77 = _idStall_77_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_78; // @[ToAXI4.scala:272:28]
wire _idStall_78_T_2 = count_78; // @[ToAXI4.scala:272:28, :286:44]
reg write_78; // @[ToAXI4.scala:273:24]
wire idle_78 = ~count_78; // @[ToAXI4.scala:272:28, :274:26]
wire inc_78 = a_sel_78 & _inc_T_78; // @[Decoupled.scala:51:35]
wire _dec_T_156 = d_sel_78 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_78 = _dec_T_156 & _dec_T_157; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_312 = {1'h0, count_78} + {1'h0, inc_78}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_313 = _count_T_312[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_314 = {1'h0, _count_T_313} - {1'h0, dec_78}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_315 = _count_T_314[0]; // @[ToAXI4.scala:278:37]
wire _idStall_78_T = ~idle_78; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_78_T_3 = _idStall_78_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_78 = _idStall_78_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_79; // @[ToAXI4.scala:272:28]
wire _idStall_79_T_2 = count_79; // @[ToAXI4.scala:272:28, :286:44]
reg write_79; // @[ToAXI4.scala:273:24]
wire idle_79 = ~count_79; // @[ToAXI4.scala:272:28, :274:26]
wire inc_79 = a_sel_79 & _inc_T_79; // @[Decoupled.scala:51:35]
wire _dec_T_158 = d_sel_79 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_79 = _dec_T_158 & _dec_T_159; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_316 = {1'h0, count_79} + {1'h0, inc_79}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_317 = _count_T_316[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_318 = {1'h0, _count_T_317} - {1'h0, dec_79}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_319 = _count_T_318[0]; // @[ToAXI4.scala:278:37]
wire _idStall_79_T = ~idle_79; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_79_T_3 = _idStall_79_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_79 = _idStall_79_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_80; // @[ToAXI4.scala:272:28]
wire _idStall_80_T_2 = count_80; // @[ToAXI4.scala:272:28, :286:44]
reg write_80; // @[ToAXI4.scala:273:24]
wire idle_80 = ~count_80; // @[ToAXI4.scala:272:28, :274:26]
wire inc_80 = a_sel_80 & _inc_T_80; // @[Decoupled.scala:51:35]
wire _dec_T_160 = d_sel_80 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_80 = _dec_T_160 & _dec_T_161; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_320 = {1'h0, count_80} + {1'h0, inc_80}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_321 = _count_T_320[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_322 = {1'h0, _count_T_321} - {1'h0, dec_80}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_323 = _count_T_322[0]; // @[ToAXI4.scala:278:37]
wire _idStall_80_T = ~idle_80; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_80_T_3 = _idStall_80_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_80 = _idStall_80_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_81; // @[ToAXI4.scala:272:28]
wire _idStall_81_T_2 = count_81; // @[ToAXI4.scala:272:28, :286:44]
reg write_81; // @[ToAXI4.scala:273:24]
wire idle_81 = ~count_81; // @[ToAXI4.scala:272:28, :274:26]
wire inc_81 = a_sel_81 & _inc_T_81; // @[Decoupled.scala:51:35]
wire _dec_T_162 = d_sel_81 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_81 = _dec_T_162 & _dec_T_163; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_324 = {1'h0, count_81} + {1'h0, inc_81}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_325 = _count_T_324[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_326 = {1'h0, _count_T_325} - {1'h0, dec_81}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_327 = _count_T_326[0]; // @[ToAXI4.scala:278:37]
wire _idStall_81_T = ~idle_81; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_81_T_3 = _idStall_81_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_81 = _idStall_81_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_82; // @[ToAXI4.scala:272:28]
wire _idStall_82_T_2 = count_82; // @[ToAXI4.scala:272:28, :286:44]
reg write_82; // @[ToAXI4.scala:273:24]
wire idle_82 = ~count_82; // @[ToAXI4.scala:272:28, :274:26]
wire inc_82 = a_sel_82 & _inc_T_82; // @[Decoupled.scala:51:35]
wire _dec_T_164 = d_sel_82 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_82 = _dec_T_164 & _dec_T_165; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_328 = {1'h0, count_82} + {1'h0, inc_82}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_329 = _count_T_328[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_330 = {1'h0, _count_T_329} - {1'h0, dec_82}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_331 = _count_T_330[0]; // @[ToAXI4.scala:278:37]
wire _idStall_82_T = ~idle_82; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_82_T_3 = _idStall_82_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_82 = _idStall_82_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_83; // @[ToAXI4.scala:272:28]
wire _idStall_83_T_2 = count_83; // @[ToAXI4.scala:272:28, :286:44]
reg write_83; // @[ToAXI4.scala:273:24]
wire idle_83 = ~count_83; // @[ToAXI4.scala:272:28, :274:26]
wire inc_83 = a_sel_83 & _inc_T_83; // @[Decoupled.scala:51:35]
wire _dec_T_166 = d_sel_83 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_83 = _dec_T_166 & _dec_T_167; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_332 = {1'h0, count_83} + {1'h0, inc_83}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_333 = _count_T_332[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_334 = {1'h0, _count_T_333} - {1'h0, dec_83}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_335 = _count_T_334[0]; // @[ToAXI4.scala:278:37]
wire _idStall_83_T = ~idle_83; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_83_T_3 = _idStall_83_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_83 = _idStall_83_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_84; // @[ToAXI4.scala:272:28]
wire _idStall_84_T_2 = count_84; // @[ToAXI4.scala:272:28, :286:44]
reg write_84; // @[ToAXI4.scala:273:24]
wire idle_84 = ~count_84; // @[ToAXI4.scala:272:28, :274:26]
wire inc_84 = a_sel_84 & _inc_T_84; // @[Decoupled.scala:51:35]
wire _dec_T_168 = d_sel_84 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_84 = _dec_T_168 & _dec_T_169; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_336 = {1'h0, count_84} + {1'h0, inc_84}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_337 = _count_T_336[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_338 = {1'h0, _count_T_337} - {1'h0, dec_84}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_339 = _count_T_338[0]; // @[ToAXI4.scala:278:37]
wire _idStall_84_T = ~idle_84; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_84_T_3 = _idStall_84_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_84 = _idStall_84_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_85; // @[ToAXI4.scala:272:28]
wire _idStall_85_T_2 = count_85; // @[ToAXI4.scala:272:28, :286:44]
reg write_85; // @[ToAXI4.scala:273:24]
wire idle_85 = ~count_85; // @[ToAXI4.scala:272:28, :274:26]
wire inc_85 = a_sel_85 & _inc_T_85; // @[Decoupled.scala:51:35]
wire _dec_T_170 = d_sel_85 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_85 = _dec_T_170 & _dec_T_171; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_340 = {1'h0, count_85} + {1'h0, inc_85}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_341 = _count_T_340[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_342 = {1'h0, _count_T_341} - {1'h0, dec_85}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_343 = _count_T_342[0]; // @[ToAXI4.scala:278:37]
wire _idStall_85_T = ~idle_85; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_85_T_3 = _idStall_85_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_85 = _idStall_85_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_86; // @[ToAXI4.scala:272:28]
wire _idStall_86_T_2 = count_86; // @[ToAXI4.scala:272:28, :286:44]
reg write_86; // @[ToAXI4.scala:273:24]
wire idle_86 = ~count_86; // @[ToAXI4.scala:272:28, :274:26]
wire inc_86 = a_sel_86 & _inc_T_86; // @[Decoupled.scala:51:35]
wire _dec_T_172 = d_sel_86 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_86 = _dec_T_172 & _dec_T_173; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_344 = {1'h0, count_86} + {1'h0, inc_86}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_345 = _count_T_344[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_346 = {1'h0, _count_T_345} - {1'h0, dec_86}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_347 = _count_T_346[0]; // @[ToAXI4.scala:278:37]
wire _idStall_86_T = ~idle_86; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_86_T_3 = _idStall_86_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_86 = _idStall_86_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_87; // @[ToAXI4.scala:272:28]
wire _idStall_87_T_2 = count_87; // @[ToAXI4.scala:272:28, :286:44]
reg write_87; // @[ToAXI4.scala:273:24]
wire idle_87 = ~count_87; // @[ToAXI4.scala:272:28, :274:26]
wire inc_87 = a_sel_87 & _inc_T_87; // @[Decoupled.scala:51:35]
wire _dec_T_174 = d_sel_87 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_87 = _dec_T_174 & _dec_T_175; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_348 = {1'h0, count_87} + {1'h0, inc_87}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_349 = _count_T_348[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_350 = {1'h0, _count_T_349} - {1'h0, dec_87}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_351 = _count_T_350[0]; // @[ToAXI4.scala:278:37]
wire _idStall_87_T = ~idle_87; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_87_T_3 = _idStall_87_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_87 = _idStall_87_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_88; // @[ToAXI4.scala:272:28]
wire _idStall_88_T_2 = count_88; // @[ToAXI4.scala:272:28, :286:44]
reg write_88; // @[ToAXI4.scala:273:24]
wire idle_88 = ~count_88; // @[ToAXI4.scala:272:28, :274:26]
wire inc_88 = a_sel_88 & _inc_T_88; // @[Decoupled.scala:51:35]
wire _dec_T_176 = d_sel_88 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_88 = _dec_T_176 & _dec_T_177; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_352 = {1'h0, count_88} + {1'h0, inc_88}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_353 = _count_T_352[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_354 = {1'h0, _count_T_353} - {1'h0, dec_88}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_355 = _count_T_354[0]; // @[ToAXI4.scala:278:37]
wire _idStall_88_T = ~idle_88; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_88_T_3 = _idStall_88_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_88 = _idStall_88_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_89; // @[ToAXI4.scala:272:28]
wire _idStall_89_T_2 = count_89; // @[ToAXI4.scala:272:28, :286:44]
reg write_89; // @[ToAXI4.scala:273:24]
wire idle_89 = ~count_89; // @[ToAXI4.scala:272:28, :274:26]
wire inc_89 = a_sel_89 & _inc_T_89; // @[Decoupled.scala:51:35]
wire _dec_T_178 = d_sel_89 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_89 = _dec_T_178 & _dec_T_179; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_356 = {1'h0, count_89} + {1'h0, inc_89}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_357 = _count_T_356[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_358 = {1'h0, _count_T_357} - {1'h0, dec_89}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_359 = _count_T_358[0]; // @[ToAXI4.scala:278:37]
wire _idStall_89_T = ~idle_89; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_89_T_3 = _idStall_89_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_89 = _idStall_89_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_90; // @[ToAXI4.scala:272:28]
wire _idStall_90_T_2 = count_90; // @[ToAXI4.scala:272:28, :286:44]
reg write_90; // @[ToAXI4.scala:273:24]
wire idle_90 = ~count_90; // @[ToAXI4.scala:272:28, :274:26]
wire inc_90 = a_sel_90 & _inc_T_90; // @[Decoupled.scala:51:35]
wire _dec_T_180 = d_sel_90 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_90 = _dec_T_180 & _dec_T_181; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_360 = {1'h0, count_90} + {1'h0, inc_90}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_361 = _count_T_360[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_362 = {1'h0, _count_T_361} - {1'h0, dec_90}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_363 = _count_T_362[0]; // @[ToAXI4.scala:278:37]
wire _idStall_90_T = ~idle_90; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_90_T_3 = _idStall_90_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_90 = _idStall_90_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_91; // @[ToAXI4.scala:272:28]
wire _idStall_91_T_2 = count_91; // @[ToAXI4.scala:272:28, :286:44]
reg write_91; // @[ToAXI4.scala:273:24]
wire idle_91 = ~count_91; // @[ToAXI4.scala:272:28, :274:26]
wire inc_91 = a_sel_91 & _inc_T_91; // @[Decoupled.scala:51:35]
wire _dec_T_182 = d_sel_91 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_91 = _dec_T_182 & _dec_T_183; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_364 = {1'h0, count_91} + {1'h0, inc_91}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_365 = _count_T_364[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_366 = {1'h0, _count_T_365} - {1'h0, dec_91}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_367 = _count_T_366[0]; // @[ToAXI4.scala:278:37]
wire _idStall_91_T = ~idle_91; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_91_T_3 = _idStall_91_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_91 = _idStall_91_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_92; // @[ToAXI4.scala:272:28]
wire _idStall_92_T_2 = count_92; // @[ToAXI4.scala:272:28, :286:44]
reg write_92; // @[ToAXI4.scala:273:24]
wire idle_92 = ~count_92; // @[ToAXI4.scala:272:28, :274:26]
wire inc_92 = a_sel_92 & _inc_T_92; // @[Decoupled.scala:51:35]
wire _dec_T_184 = d_sel_92 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_92 = _dec_T_184 & _dec_T_185; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_368 = {1'h0, count_92} + {1'h0, inc_92}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_369 = _count_T_368[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_370 = {1'h0, _count_T_369} - {1'h0, dec_92}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_371 = _count_T_370[0]; // @[ToAXI4.scala:278:37]
wire _idStall_92_T = ~idle_92; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_92_T_3 = _idStall_92_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_92 = _idStall_92_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_93; // @[ToAXI4.scala:272:28]
wire _idStall_93_T_2 = count_93; // @[ToAXI4.scala:272:28, :286:44]
reg write_93; // @[ToAXI4.scala:273:24]
wire idle_93 = ~count_93; // @[ToAXI4.scala:272:28, :274:26]
wire inc_93 = a_sel_93 & _inc_T_93; // @[Decoupled.scala:51:35]
wire _dec_T_186 = d_sel_93 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_93 = _dec_T_186 & _dec_T_187; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_372 = {1'h0, count_93} + {1'h0, inc_93}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_373 = _count_T_372[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_374 = {1'h0, _count_T_373} - {1'h0, dec_93}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_375 = _count_T_374[0]; // @[ToAXI4.scala:278:37]
wire _idStall_93_T = ~idle_93; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_93_T_3 = _idStall_93_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_93 = _idStall_93_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_94; // @[ToAXI4.scala:272:28]
wire _idStall_94_T_2 = count_94; // @[ToAXI4.scala:272:28, :286:44]
reg write_94; // @[ToAXI4.scala:273:24]
wire idle_94 = ~count_94; // @[ToAXI4.scala:272:28, :274:26]
wire inc_94 = a_sel_94 & _inc_T_94; // @[Decoupled.scala:51:35]
wire _dec_T_188 = d_sel_94 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_94 = _dec_T_188 & _dec_T_189; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_376 = {1'h0, count_94} + {1'h0, inc_94}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_377 = _count_T_376[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_378 = {1'h0, _count_T_377} - {1'h0, dec_94}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_379 = _count_T_378[0]; // @[ToAXI4.scala:278:37]
wire _idStall_94_T = ~idle_94; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_94_T_3 = _idStall_94_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_94 = _idStall_94_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_95; // @[ToAXI4.scala:272:28]
wire _idStall_95_T_2 = count_95; // @[ToAXI4.scala:272:28, :286:44]
reg write_95; // @[ToAXI4.scala:273:24]
wire idle_95 = ~count_95; // @[ToAXI4.scala:272:28, :274:26]
wire inc_95 = a_sel_95 & _inc_T_95; // @[Decoupled.scala:51:35]
wire _dec_T_190 = d_sel_95 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_95 = _dec_T_190 & _dec_T_191; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_380 = {1'h0, count_95} + {1'h0, inc_95}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_381 = _count_T_380[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_382 = {1'h0, _count_T_381} - {1'h0, dec_95}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_383 = _count_T_382[0]; // @[ToAXI4.scala:278:37]
wire _idStall_95_T = ~idle_95; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_95_T_3 = _idStall_95_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_95 = _idStall_95_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_96; // @[ToAXI4.scala:272:28]
wire _idStall_96_T_2 = count_96; // @[ToAXI4.scala:272:28, :286:44]
reg write_96; // @[ToAXI4.scala:273:24]
wire idle_96 = ~count_96; // @[ToAXI4.scala:272:28, :274:26]
wire inc_96 = a_sel_96 & _inc_T_96; // @[Decoupled.scala:51:35]
wire _dec_T_192 = d_sel_96 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_96 = _dec_T_192 & _dec_T_193; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_384 = {1'h0, count_96} + {1'h0, inc_96}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_385 = _count_T_384[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_386 = {1'h0, _count_T_385} - {1'h0, dec_96}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_387 = _count_T_386[0]; // @[ToAXI4.scala:278:37]
wire _idStall_96_T = ~idle_96; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_96_T_3 = _idStall_96_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_96 = _idStall_96_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_97; // @[ToAXI4.scala:272:28]
wire _idStall_97_T_2 = count_97; // @[ToAXI4.scala:272:28, :286:44]
reg write_97; // @[ToAXI4.scala:273:24]
wire idle_97 = ~count_97; // @[ToAXI4.scala:272:28, :274:26]
wire inc_97 = a_sel_97 & _inc_T_97; // @[Decoupled.scala:51:35]
wire _dec_T_194 = d_sel_97 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_97 = _dec_T_194 & _dec_T_195; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_388 = {1'h0, count_97} + {1'h0, inc_97}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_389 = _count_T_388[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_390 = {1'h0, _count_T_389} - {1'h0, dec_97}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_391 = _count_T_390[0]; // @[ToAXI4.scala:278:37]
wire _idStall_97_T = ~idle_97; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_97_T_3 = _idStall_97_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_97 = _idStall_97_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_98; // @[ToAXI4.scala:272:28]
wire _idStall_98_T_2 = count_98; // @[ToAXI4.scala:272:28, :286:44]
reg write_98; // @[ToAXI4.scala:273:24]
wire idle_98 = ~count_98; // @[ToAXI4.scala:272:28, :274:26]
wire inc_98 = a_sel_98 & _inc_T_98; // @[Decoupled.scala:51:35]
wire _dec_T_196 = d_sel_98 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_98 = _dec_T_196 & _dec_T_197; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_392 = {1'h0, count_98} + {1'h0, inc_98}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_393 = _count_T_392[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_394 = {1'h0, _count_T_393} - {1'h0, dec_98}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_395 = _count_T_394[0]; // @[ToAXI4.scala:278:37]
wire _idStall_98_T = ~idle_98; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_98_T_3 = _idStall_98_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_98 = _idStall_98_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_99; // @[ToAXI4.scala:272:28]
wire _idStall_99_T_2 = count_99; // @[ToAXI4.scala:272:28, :286:44]
reg write_99; // @[ToAXI4.scala:273:24]
wire idle_99 = ~count_99; // @[ToAXI4.scala:272:28, :274:26]
wire inc_99 = a_sel_99 & _inc_T_99; // @[Decoupled.scala:51:35]
wire _dec_T_198 = d_sel_99 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_99 = _dec_T_198 & _dec_T_199; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_396 = {1'h0, count_99} + {1'h0, inc_99}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_397 = _count_T_396[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_398 = {1'h0, _count_T_397} - {1'h0, dec_99}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_399 = _count_T_398[0]; // @[ToAXI4.scala:278:37]
wire _idStall_99_T = ~idle_99; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_99_T_3 = _idStall_99_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_99 = _idStall_99_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_100; // @[ToAXI4.scala:272:28]
wire _idStall_100_T_2 = count_100; // @[ToAXI4.scala:272:28, :286:44]
reg write_100; // @[ToAXI4.scala:273:24]
wire idle_100 = ~count_100; // @[ToAXI4.scala:272:28, :274:26]
wire inc_100 = a_sel_100 & _inc_T_100; // @[Decoupled.scala:51:35]
wire _dec_T_200 = d_sel_100 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_100 = _dec_T_200 & _dec_T_201; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_400 = {1'h0, count_100} + {1'h0, inc_100}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_401 = _count_T_400[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_402 = {1'h0, _count_T_401} - {1'h0, dec_100}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_403 = _count_T_402[0]; // @[ToAXI4.scala:278:37]
wire _idStall_100_T = ~idle_100; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_100_T_3 = _idStall_100_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_100 = _idStall_100_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_101; // @[ToAXI4.scala:272:28]
wire _idStall_101_T_2 = count_101; // @[ToAXI4.scala:272:28, :286:44]
reg write_101; // @[ToAXI4.scala:273:24]
wire idle_101 = ~count_101; // @[ToAXI4.scala:272:28, :274:26]
wire inc_101 = a_sel_101 & _inc_T_101; // @[Decoupled.scala:51:35]
wire _dec_T_202 = d_sel_101 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_101 = _dec_T_202 & _dec_T_203; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_404 = {1'h0, count_101} + {1'h0, inc_101}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_405 = _count_T_404[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_406 = {1'h0, _count_T_405} - {1'h0, dec_101}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_407 = _count_T_406[0]; // @[ToAXI4.scala:278:37]
wire _idStall_101_T = ~idle_101; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_101_T_3 = _idStall_101_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_101 = _idStall_101_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_102; // @[ToAXI4.scala:272:28]
wire _idStall_102_T_2 = count_102; // @[ToAXI4.scala:272:28, :286:44]
reg write_102; // @[ToAXI4.scala:273:24]
wire idle_102 = ~count_102; // @[ToAXI4.scala:272:28, :274:26]
wire inc_102 = a_sel_102 & _inc_T_102; // @[Decoupled.scala:51:35]
wire _dec_T_204 = d_sel_102 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_102 = _dec_T_204 & _dec_T_205; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_408 = {1'h0, count_102} + {1'h0, inc_102}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_409 = _count_T_408[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_410 = {1'h0, _count_T_409} - {1'h0, dec_102}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_411 = _count_T_410[0]; // @[ToAXI4.scala:278:37]
wire _idStall_102_T = ~idle_102; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_102_T_3 = _idStall_102_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_102 = _idStall_102_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_103; // @[ToAXI4.scala:272:28]
wire _idStall_103_T_2 = count_103; // @[ToAXI4.scala:272:28, :286:44]
reg write_103; // @[ToAXI4.scala:273:24]
wire idle_103 = ~count_103; // @[ToAXI4.scala:272:28, :274:26]
wire inc_103 = a_sel_103 & _inc_T_103; // @[Decoupled.scala:51:35]
wire _dec_T_206 = d_sel_103 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_103 = _dec_T_206 & _dec_T_207; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_412 = {1'h0, count_103} + {1'h0, inc_103}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_413 = _count_T_412[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_414 = {1'h0, _count_T_413} - {1'h0, dec_103}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_415 = _count_T_414[0]; // @[ToAXI4.scala:278:37]
wire _idStall_103_T = ~idle_103; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_103_T_3 = _idStall_103_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_103 = _idStall_103_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_104; // @[ToAXI4.scala:272:28]
wire _idStall_104_T_2 = count_104; // @[ToAXI4.scala:272:28, :286:44]
reg write_104; // @[ToAXI4.scala:273:24]
wire idle_104 = ~count_104; // @[ToAXI4.scala:272:28, :274:26]
wire inc_104 = a_sel_104 & _inc_T_104; // @[Decoupled.scala:51:35]
wire _dec_T_208 = d_sel_104 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_104 = _dec_T_208 & _dec_T_209; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_416 = {1'h0, count_104} + {1'h0, inc_104}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_417 = _count_T_416[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_418 = {1'h0, _count_T_417} - {1'h0, dec_104}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_419 = _count_T_418[0]; // @[ToAXI4.scala:278:37]
wire _idStall_104_T = ~idle_104; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_104_T_3 = _idStall_104_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_104 = _idStall_104_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_105; // @[ToAXI4.scala:272:28]
wire _idStall_105_T_2 = count_105; // @[ToAXI4.scala:272:28, :286:44]
reg write_105; // @[ToAXI4.scala:273:24]
wire idle_105 = ~count_105; // @[ToAXI4.scala:272:28, :274:26]
wire inc_105 = a_sel_105 & _inc_T_105; // @[Decoupled.scala:51:35]
wire _dec_T_210 = d_sel_105 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_105 = _dec_T_210 & _dec_T_211; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_420 = {1'h0, count_105} + {1'h0, inc_105}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_421 = _count_T_420[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_422 = {1'h0, _count_T_421} - {1'h0, dec_105}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_423 = _count_T_422[0]; // @[ToAXI4.scala:278:37]
wire _idStall_105_T = ~idle_105; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_105_T_3 = _idStall_105_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_105 = _idStall_105_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_106; // @[ToAXI4.scala:272:28]
wire _idStall_106_T_2 = count_106; // @[ToAXI4.scala:272:28, :286:44]
reg write_106; // @[ToAXI4.scala:273:24]
wire idle_106 = ~count_106; // @[ToAXI4.scala:272:28, :274:26]
wire inc_106 = a_sel_106 & _inc_T_106; // @[Decoupled.scala:51:35]
wire _dec_T_212 = d_sel_106 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_106 = _dec_T_212 & _dec_T_213; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_424 = {1'h0, count_106} + {1'h0, inc_106}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_425 = _count_T_424[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_426 = {1'h0, _count_T_425} - {1'h0, dec_106}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_427 = _count_T_426[0]; // @[ToAXI4.scala:278:37]
wire _idStall_106_T = ~idle_106; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_106_T_3 = _idStall_106_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_106 = _idStall_106_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_107; // @[ToAXI4.scala:272:28]
wire _idStall_107_T_2 = count_107; // @[ToAXI4.scala:272:28, :286:44]
reg write_107; // @[ToAXI4.scala:273:24]
wire idle_107 = ~count_107; // @[ToAXI4.scala:272:28, :274:26]
wire inc_107 = a_sel_107 & _inc_T_107; // @[Decoupled.scala:51:35]
wire _dec_T_214 = d_sel_107 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_107 = _dec_T_214 & _dec_T_215; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_428 = {1'h0, count_107} + {1'h0, inc_107}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_429 = _count_T_428[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_430 = {1'h0, _count_T_429} - {1'h0, dec_107}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_431 = _count_T_430[0]; // @[ToAXI4.scala:278:37]
wire _idStall_107_T = ~idle_107; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_107_T_3 = _idStall_107_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_107 = _idStall_107_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_108; // @[ToAXI4.scala:272:28]
wire _idStall_108_T_2 = count_108; // @[ToAXI4.scala:272:28, :286:44]
reg write_108; // @[ToAXI4.scala:273:24]
wire idle_108 = ~count_108; // @[ToAXI4.scala:272:28, :274:26]
wire inc_108 = a_sel_108 & _inc_T_108; // @[Decoupled.scala:51:35]
wire _dec_T_216 = d_sel_108 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_108 = _dec_T_216 & _dec_T_217; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_432 = {1'h0, count_108} + {1'h0, inc_108}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_433 = _count_T_432[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_434 = {1'h0, _count_T_433} - {1'h0, dec_108}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_435 = _count_T_434[0]; // @[ToAXI4.scala:278:37]
wire _idStall_108_T = ~idle_108; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_108_T_3 = _idStall_108_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_108 = _idStall_108_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_109; // @[ToAXI4.scala:272:28]
wire _idStall_109_T_2 = count_109; // @[ToAXI4.scala:272:28, :286:44]
reg write_109; // @[ToAXI4.scala:273:24]
wire idle_109 = ~count_109; // @[ToAXI4.scala:272:28, :274:26]
wire inc_109 = a_sel_109 & _inc_T_109; // @[Decoupled.scala:51:35]
wire _dec_T_218 = d_sel_109 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_109 = _dec_T_218 & _dec_T_219; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_436 = {1'h0, count_109} + {1'h0, inc_109}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_437 = _count_T_436[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_438 = {1'h0, _count_T_437} - {1'h0, dec_109}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_439 = _count_T_438[0]; // @[ToAXI4.scala:278:37]
wire _idStall_109_T = ~idle_109; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_109_T_3 = _idStall_109_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_109 = _idStall_109_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_110; // @[ToAXI4.scala:272:28]
wire _idStall_110_T_2 = count_110; // @[ToAXI4.scala:272:28, :286:44]
reg write_110; // @[ToAXI4.scala:273:24]
wire idle_110 = ~count_110; // @[ToAXI4.scala:272:28, :274:26]
wire inc_110 = a_sel_110 & _inc_T_110; // @[Decoupled.scala:51:35]
wire _dec_T_220 = d_sel_110 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_110 = _dec_T_220 & _dec_T_221; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_440 = {1'h0, count_110} + {1'h0, inc_110}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_441 = _count_T_440[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_442 = {1'h0, _count_T_441} - {1'h0, dec_110}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_443 = _count_T_442[0]; // @[ToAXI4.scala:278:37]
wire _idStall_110_T = ~idle_110; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_110_T_3 = _idStall_110_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_110 = _idStall_110_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_111; // @[ToAXI4.scala:272:28]
wire _idStall_111_T_2 = count_111; // @[ToAXI4.scala:272:28, :286:44]
reg write_111; // @[ToAXI4.scala:273:24]
wire idle_111 = ~count_111; // @[ToAXI4.scala:272:28, :274:26]
wire inc_111 = a_sel_111 & _inc_T_111; // @[Decoupled.scala:51:35]
wire _dec_T_222 = d_sel_111 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_111 = _dec_T_222 & _dec_T_223; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_444 = {1'h0, count_111} + {1'h0, inc_111}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_445 = _count_T_444[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_446 = {1'h0, _count_T_445} - {1'h0, dec_111}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_447 = _count_T_446[0]; // @[ToAXI4.scala:278:37]
wire _idStall_111_T = ~idle_111; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_111_T_3 = _idStall_111_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_111 = _idStall_111_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_112; // @[ToAXI4.scala:272:28]
wire _idStall_112_T_2 = count_112; // @[ToAXI4.scala:272:28, :286:44]
reg write_112; // @[ToAXI4.scala:273:24]
wire idle_112 = ~count_112; // @[ToAXI4.scala:272:28, :274:26]
wire inc_112 = a_sel_112 & _inc_T_112; // @[Decoupled.scala:51:35]
wire _dec_T_224 = d_sel_112 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_112 = _dec_T_224 & _dec_T_225; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_448 = {1'h0, count_112} + {1'h0, inc_112}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_449 = _count_T_448[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_450 = {1'h0, _count_T_449} - {1'h0, dec_112}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_451 = _count_T_450[0]; // @[ToAXI4.scala:278:37]
wire _idStall_112_T = ~idle_112; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_112_T_3 = _idStall_112_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_112 = _idStall_112_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_113; // @[ToAXI4.scala:272:28]
wire _idStall_113_T_2 = count_113; // @[ToAXI4.scala:272:28, :286:44]
reg write_113; // @[ToAXI4.scala:273:24]
wire idle_113 = ~count_113; // @[ToAXI4.scala:272:28, :274:26]
wire inc_113 = a_sel_113 & _inc_T_113; // @[Decoupled.scala:51:35]
wire _dec_T_226 = d_sel_113 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_113 = _dec_T_226 & _dec_T_227; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_452 = {1'h0, count_113} + {1'h0, inc_113}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_453 = _count_T_452[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_454 = {1'h0, _count_T_453} - {1'h0, dec_113}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_455 = _count_T_454[0]; // @[ToAXI4.scala:278:37]
wire _idStall_113_T = ~idle_113; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_113_T_3 = _idStall_113_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_113 = _idStall_113_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_114; // @[ToAXI4.scala:272:28]
wire _idStall_114_T_2 = count_114; // @[ToAXI4.scala:272:28, :286:44]
reg write_114; // @[ToAXI4.scala:273:24]
wire idle_114 = ~count_114; // @[ToAXI4.scala:272:28, :274:26]
wire inc_114 = a_sel_114 & _inc_T_114; // @[Decoupled.scala:51:35]
wire _dec_T_228 = d_sel_114 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_114 = _dec_T_228 & _dec_T_229; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_456 = {1'h0, count_114} + {1'h0, inc_114}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_457 = _count_T_456[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_458 = {1'h0, _count_T_457} - {1'h0, dec_114}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_459 = _count_T_458[0]; // @[ToAXI4.scala:278:37]
wire _idStall_114_T = ~idle_114; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_114_T_3 = _idStall_114_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_114 = _idStall_114_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_115; // @[ToAXI4.scala:272:28]
wire _idStall_115_T_2 = count_115; // @[ToAXI4.scala:272:28, :286:44]
reg write_115; // @[ToAXI4.scala:273:24]
wire idle_115 = ~count_115; // @[ToAXI4.scala:272:28, :274:26]
wire inc_115 = a_sel_115 & _inc_T_115; // @[Decoupled.scala:51:35]
wire _dec_T_230 = d_sel_115 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_115 = _dec_T_230 & _dec_T_231; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_460 = {1'h0, count_115} + {1'h0, inc_115}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_461 = _count_T_460[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_462 = {1'h0, _count_T_461} - {1'h0, dec_115}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_463 = _count_T_462[0]; // @[ToAXI4.scala:278:37]
wire _idStall_115_T = ~idle_115; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_115_T_3 = _idStall_115_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_115 = _idStall_115_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_116; // @[ToAXI4.scala:272:28]
wire _idStall_116_T_2 = count_116; // @[ToAXI4.scala:272:28, :286:44]
reg write_116; // @[ToAXI4.scala:273:24]
wire idle_116 = ~count_116; // @[ToAXI4.scala:272:28, :274:26]
wire inc_116 = a_sel_116 & _inc_T_116; // @[Decoupled.scala:51:35]
wire _dec_T_232 = d_sel_116 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_116 = _dec_T_232 & _dec_T_233; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_464 = {1'h0, count_116} + {1'h0, inc_116}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_465 = _count_T_464[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_466 = {1'h0, _count_T_465} - {1'h0, dec_116}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_467 = _count_T_466[0]; // @[ToAXI4.scala:278:37]
wire _idStall_116_T = ~idle_116; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_116_T_3 = _idStall_116_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_116 = _idStall_116_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_117; // @[ToAXI4.scala:272:28]
wire _idStall_117_T_2 = count_117; // @[ToAXI4.scala:272:28, :286:44]
reg write_117; // @[ToAXI4.scala:273:24]
wire idle_117 = ~count_117; // @[ToAXI4.scala:272:28, :274:26]
wire inc_117 = a_sel_117 & _inc_T_117; // @[Decoupled.scala:51:35]
wire _dec_T_234 = d_sel_117 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_117 = _dec_T_234 & _dec_T_235; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_468 = {1'h0, count_117} + {1'h0, inc_117}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_469 = _count_T_468[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_470 = {1'h0, _count_T_469} - {1'h0, dec_117}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_471 = _count_T_470[0]; // @[ToAXI4.scala:278:37]
wire _idStall_117_T = ~idle_117; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_117_T_3 = _idStall_117_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_117 = _idStall_117_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_118; // @[ToAXI4.scala:272:28]
wire _idStall_118_T_2 = count_118; // @[ToAXI4.scala:272:28, :286:44]
reg write_118; // @[ToAXI4.scala:273:24]
wire idle_118 = ~count_118; // @[ToAXI4.scala:272:28, :274:26]
wire inc_118 = a_sel_118 & _inc_T_118; // @[Decoupled.scala:51:35]
wire _dec_T_236 = d_sel_118 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_118 = _dec_T_236 & _dec_T_237; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_472 = {1'h0, count_118} + {1'h0, inc_118}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_473 = _count_T_472[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_474 = {1'h0, _count_T_473} - {1'h0, dec_118}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_475 = _count_T_474[0]; // @[ToAXI4.scala:278:37]
wire _idStall_118_T = ~idle_118; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_118_T_3 = _idStall_118_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_118 = _idStall_118_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_119; // @[ToAXI4.scala:272:28]
wire _idStall_119_T_2 = count_119; // @[ToAXI4.scala:272:28, :286:44]
reg write_119; // @[ToAXI4.scala:273:24]
wire idle_119 = ~count_119; // @[ToAXI4.scala:272:28, :274:26]
wire inc_119 = a_sel_119 & _inc_T_119; // @[Decoupled.scala:51:35]
wire _dec_T_238 = d_sel_119 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_119 = _dec_T_238 & _dec_T_239; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_476 = {1'h0, count_119} + {1'h0, inc_119}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_477 = _count_T_476[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_478 = {1'h0, _count_T_477} - {1'h0, dec_119}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_479 = _count_T_478[0]; // @[ToAXI4.scala:278:37]
wire _idStall_119_T = ~idle_119; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_119_T_3 = _idStall_119_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_119 = _idStall_119_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_120; // @[ToAXI4.scala:272:28]
wire _idStall_120_T_2 = count_120; // @[ToAXI4.scala:272:28, :286:44]
reg write_120; // @[ToAXI4.scala:273:24]
wire idle_120 = ~count_120; // @[ToAXI4.scala:272:28, :274:26]
wire inc_120 = a_sel_120 & _inc_T_120; // @[Decoupled.scala:51:35]
wire _dec_T_240 = d_sel_120 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_120 = _dec_T_240 & _dec_T_241; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_480 = {1'h0, count_120} + {1'h0, inc_120}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_481 = _count_T_480[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_482 = {1'h0, _count_T_481} - {1'h0, dec_120}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_483 = _count_T_482[0]; // @[ToAXI4.scala:278:37]
wire _idStall_120_T = ~idle_120; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_120_T_3 = _idStall_120_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_120 = _idStall_120_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_121; // @[ToAXI4.scala:272:28]
wire _idStall_121_T_2 = count_121; // @[ToAXI4.scala:272:28, :286:44]
reg write_121; // @[ToAXI4.scala:273:24]
wire idle_121 = ~count_121; // @[ToAXI4.scala:272:28, :274:26]
wire inc_121 = a_sel_121 & _inc_T_121; // @[Decoupled.scala:51:35]
wire _dec_T_242 = d_sel_121 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_121 = _dec_T_242 & _dec_T_243; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_484 = {1'h0, count_121} + {1'h0, inc_121}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_485 = _count_T_484[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_486 = {1'h0, _count_T_485} - {1'h0, dec_121}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_487 = _count_T_486[0]; // @[ToAXI4.scala:278:37]
wire _idStall_121_T = ~idle_121; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_121_T_3 = _idStall_121_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_121 = _idStall_121_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_122; // @[ToAXI4.scala:272:28]
wire _idStall_122_T_2 = count_122; // @[ToAXI4.scala:272:28, :286:44]
reg write_122; // @[ToAXI4.scala:273:24]
wire idle_122 = ~count_122; // @[ToAXI4.scala:272:28, :274:26]
wire inc_122 = a_sel_122 & _inc_T_122; // @[Decoupled.scala:51:35]
wire _dec_T_244 = d_sel_122 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_122 = _dec_T_244 & _dec_T_245; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_488 = {1'h0, count_122} + {1'h0, inc_122}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_489 = _count_T_488[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_490 = {1'h0, _count_T_489} - {1'h0, dec_122}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_491 = _count_T_490[0]; // @[ToAXI4.scala:278:37]
wire _idStall_122_T = ~idle_122; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_122_T_3 = _idStall_122_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_122 = _idStall_122_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_123; // @[ToAXI4.scala:272:28]
wire _idStall_123_T_2 = count_123; // @[ToAXI4.scala:272:28, :286:44]
reg write_123; // @[ToAXI4.scala:273:24]
wire idle_123 = ~count_123; // @[ToAXI4.scala:272:28, :274:26]
wire inc_123 = a_sel_123 & _inc_T_123; // @[Decoupled.scala:51:35]
wire _dec_T_246 = d_sel_123 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_123 = _dec_T_246 & _dec_T_247; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_492 = {1'h0, count_123} + {1'h0, inc_123}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_493 = _count_T_492[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_494 = {1'h0, _count_T_493} - {1'h0, dec_123}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_495 = _count_T_494[0]; // @[ToAXI4.scala:278:37]
wire _idStall_123_T = ~idle_123; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_123_T_3 = _idStall_123_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_123 = _idStall_123_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_124; // @[ToAXI4.scala:272:28]
wire _idStall_124_T_2 = count_124; // @[ToAXI4.scala:272:28, :286:44]
reg write_124; // @[ToAXI4.scala:273:24]
wire idle_124 = ~count_124; // @[ToAXI4.scala:272:28, :274:26]
wire inc_124 = a_sel_124 & _inc_T_124; // @[Decoupled.scala:51:35]
wire _dec_T_248 = d_sel_124 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_124 = _dec_T_248 & _dec_T_249; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_496 = {1'h0, count_124} + {1'h0, inc_124}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_497 = _count_T_496[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_498 = {1'h0, _count_T_497} - {1'h0, dec_124}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_499 = _count_T_498[0]; // @[ToAXI4.scala:278:37]
wire _idStall_124_T = ~idle_124; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_124_T_3 = _idStall_124_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_124 = _idStall_124_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_125; // @[ToAXI4.scala:272:28]
wire _idStall_125_T_2 = count_125; // @[ToAXI4.scala:272:28, :286:44]
reg write_125; // @[ToAXI4.scala:273:24]
wire idle_125 = ~count_125; // @[ToAXI4.scala:272:28, :274:26]
wire inc_125 = a_sel_125 & _inc_T_125; // @[Decoupled.scala:51:35]
wire _dec_T_250 = d_sel_125 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_125 = _dec_T_250 & _dec_T_251; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_500 = {1'h0, count_125} + {1'h0, inc_125}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_501 = _count_T_500[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_502 = {1'h0, _count_T_501} - {1'h0, dec_125}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_503 = _count_T_502[0]; // @[ToAXI4.scala:278:37]
wire _idStall_125_T = ~idle_125; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_125_T_3 = _idStall_125_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_125 = _idStall_125_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_126; // @[ToAXI4.scala:272:28]
wire _idStall_126_T_2 = count_126; // @[ToAXI4.scala:272:28, :286:44]
reg write_126; // @[ToAXI4.scala:273:24]
wire idle_126 = ~count_126; // @[ToAXI4.scala:272:28, :274:26]
wire inc_126 = a_sel_126 & _inc_T_126; // @[Decoupled.scala:51:35]
wire _dec_T_252 = d_sel_126 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_126 = _dec_T_252 & _dec_T_253; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_504 = {1'h0, count_126} + {1'h0, inc_126}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_505 = _count_T_504[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_506 = {1'h0, _count_T_505} - {1'h0, dec_126}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_507 = _count_T_506[0]; // @[ToAXI4.scala:278:37]
wire _idStall_126_T = ~idle_126; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_126_T_3 = _idStall_126_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_126 = _idStall_126_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_127; // @[ToAXI4.scala:272:28]
wire _idStall_127_T_2 = count_127; // @[ToAXI4.scala:272:28, :286:44]
reg write_127; // @[ToAXI4.scala:273:24]
wire idle_127 = ~count_127; // @[ToAXI4.scala:272:28, :274:26]
wire inc_127 = a_sel_127 & _inc_T_127; // @[Decoupled.scala:51:35]
wire _dec_T_254 = d_sel_127 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_127 = _dec_T_254 & _dec_T_255; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_508 = {1'h0, count_127} + {1'h0, inc_127}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_509 = _count_T_508[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_510 = {1'h0, _count_T_509} - {1'h0, dec_127}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_511 = _count_T_510[0]; // @[ToAXI4.scala:278:37]
wire _idStall_127_T = ~idle_127; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_127_T_3 = _idStall_127_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_127 = _idStall_127_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_128; // @[ToAXI4.scala:272:28]
wire _idStall_128_T_2 = count_128; // @[ToAXI4.scala:272:28, :286:44]
reg write_128; // @[ToAXI4.scala:273:24]
wire idle_128 = ~count_128; // @[ToAXI4.scala:272:28, :274:26]
wire inc_128 = a_sel_128 & _inc_T_128; // @[Decoupled.scala:51:35]
wire _dec_T_256 = d_sel_128 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_128 = _dec_T_256 & _dec_T_257; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_512 = {1'h0, count_128} + {1'h0, inc_128}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_513 = _count_T_512[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_514 = {1'h0, _count_T_513} - {1'h0, dec_128}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_515 = _count_T_514[0]; // @[ToAXI4.scala:278:37]
wire _idStall_128_T = ~idle_128; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_128_T_3 = _idStall_128_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_128 = _idStall_128_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_129; // @[ToAXI4.scala:272:28]
wire _idStall_129_T_2 = count_129; // @[ToAXI4.scala:272:28, :286:44]
reg write_129; // @[ToAXI4.scala:273:24]
wire idle_129 = ~count_129; // @[ToAXI4.scala:272:28, :274:26]
wire inc_129 = a_sel_129 & _inc_T_129; // @[Decoupled.scala:51:35]
wire _dec_T_258 = d_sel_129 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_129 = _dec_T_258 & _dec_T_259; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_516 = {1'h0, count_129} + {1'h0, inc_129}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_517 = _count_T_516[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_518 = {1'h0, _count_T_517} - {1'h0, dec_129}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_519 = _count_T_518[0]; // @[ToAXI4.scala:278:37]
wire _idStall_129_T = ~idle_129; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_129_T_3 = _idStall_129_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_129 = _idStall_129_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_130; // @[ToAXI4.scala:272:28]
wire _idStall_130_T_2 = count_130; // @[ToAXI4.scala:272:28, :286:44]
reg write_130; // @[ToAXI4.scala:273:24]
wire idle_130 = ~count_130; // @[ToAXI4.scala:272:28, :274:26]
wire inc_130 = a_sel_130 & _inc_T_130; // @[Decoupled.scala:51:35]
wire _dec_T_260 = d_sel_130 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_130 = _dec_T_260 & _dec_T_261; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_520 = {1'h0, count_130} + {1'h0, inc_130}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_521 = _count_T_520[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_522 = {1'h0, _count_T_521} - {1'h0, dec_130}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_523 = _count_T_522[0]; // @[ToAXI4.scala:278:37]
wire _idStall_130_T = ~idle_130; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_130_T_3 = _idStall_130_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_130 = _idStall_130_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_131; // @[ToAXI4.scala:272:28]
wire _idStall_131_T_2 = count_131; // @[ToAXI4.scala:272:28, :286:44]
reg write_131; // @[ToAXI4.scala:273:24]
wire idle_131 = ~count_131; // @[ToAXI4.scala:272:28, :274:26]
wire inc_131 = a_sel_131 & _inc_T_131; // @[Decoupled.scala:51:35]
wire _dec_T_262 = d_sel_131 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_131 = _dec_T_262 & _dec_T_263; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_524 = {1'h0, count_131} + {1'h0, inc_131}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_525 = _count_T_524[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_526 = {1'h0, _count_T_525} - {1'h0, dec_131}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_527 = _count_T_526[0]; // @[ToAXI4.scala:278:37]
wire _idStall_131_T = ~idle_131; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_131_T_3 = _idStall_131_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_131 = _idStall_131_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_132; // @[ToAXI4.scala:272:28]
wire _idStall_132_T_2 = count_132; // @[ToAXI4.scala:272:28, :286:44]
reg write_132; // @[ToAXI4.scala:273:24]
wire idle_132 = ~count_132; // @[ToAXI4.scala:272:28, :274:26]
wire inc_132 = a_sel_132 & _inc_T_132; // @[Decoupled.scala:51:35]
wire _dec_T_264 = d_sel_132 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_132 = _dec_T_264 & _dec_T_265; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_528 = {1'h0, count_132} + {1'h0, inc_132}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_529 = _count_T_528[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_530 = {1'h0, _count_T_529} - {1'h0, dec_132}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_531 = _count_T_530[0]; // @[ToAXI4.scala:278:37]
wire _idStall_132_T = ~idle_132; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_132_T_3 = _idStall_132_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_132 = _idStall_132_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_133; // @[ToAXI4.scala:272:28]
wire _idStall_133_T_2 = count_133; // @[ToAXI4.scala:272:28, :286:44]
reg write_133; // @[ToAXI4.scala:273:24]
wire idle_133 = ~count_133; // @[ToAXI4.scala:272:28, :274:26]
wire inc_133 = a_sel_133 & _inc_T_133; // @[Decoupled.scala:51:35]
wire _dec_T_266 = d_sel_133 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_133 = _dec_T_266 & _dec_T_267; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_532 = {1'h0, count_133} + {1'h0, inc_133}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_533 = _count_T_532[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_534 = {1'h0, _count_T_533} - {1'h0, dec_133}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_535 = _count_T_534[0]; // @[ToAXI4.scala:278:37]
wire _idStall_133_T = ~idle_133; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_133_T_3 = _idStall_133_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_133 = _idStall_133_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_134; // @[ToAXI4.scala:272:28]
wire _idStall_134_T_2 = count_134; // @[ToAXI4.scala:272:28, :286:44]
reg write_134; // @[ToAXI4.scala:273:24]
wire idle_134 = ~count_134; // @[ToAXI4.scala:272:28, :274:26]
wire inc_134 = a_sel_134 & _inc_T_134; // @[Decoupled.scala:51:35]
wire _dec_T_268 = d_sel_134 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_134 = _dec_T_268 & _dec_T_269; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_536 = {1'h0, count_134} + {1'h0, inc_134}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_537 = _count_T_536[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_538 = {1'h0, _count_T_537} - {1'h0, dec_134}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_539 = _count_T_538[0]; // @[ToAXI4.scala:278:37]
wire _idStall_134_T = ~idle_134; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_134_T_3 = _idStall_134_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_134 = _idStall_134_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_135; // @[ToAXI4.scala:272:28]
wire _idStall_135_T_2 = count_135; // @[ToAXI4.scala:272:28, :286:44]
reg write_135; // @[ToAXI4.scala:273:24]
wire idle_135 = ~count_135; // @[ToAXI4.scala:272:28, :274:26]
wire inc_135 = a_sel_135 & _inc_T_135; // @[Decoupled.scala:51:35]
wire _dec_T_270 = d_sel_135 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_135 = _dec_T_270 & _dec_T_271; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_540 = {1'h0, count_135} + {1'h0, inc_135}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_541 = _count_T_540[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_542 = {1'h0, _count_T_541} - {1'h0, dec_135}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_543 = _count_T_542[0]; // @[ToAXI4.scala:278:37]
wire _idStall_135_T = ~idle_135; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_135_T_3 = _idStall_135_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_135 = _idStall_135_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_136; // @[ToAXI4.scala:272:28]
wire _idStall_136_T_2 = count_136; // @[ToAXI4.scala:272:28, :286:44]
reg write_136; // @[ToAXI4.scala:273:24]
wire idle_136 = ~count_136; // @[ToAXI4.scala:272:28, :274:26]
wire inc_136 = a_sel_136 & _inc_T_136; // @[Decoupled.scala:51:35]
wire _dec_T_272 = d_sel_136 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_136 = _dec_T_272 & _dec_T_273; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_544 = {1'h0, count_136} + {1'h0, inc_136}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_545 = _count_T_544[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_546 = {1'h0, _count_T_545} - {1'h0, dec_136}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_547 = _count_T_546[0]; // @[ToAXI4.scala:278:37]
wire _idStall_136_T = ~idle_136; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_136_T_3 = _idStall_136_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_136 = _idStall_136_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_137; // @[ToAXI4.scala:272:28]
wire _idStall_137_T_2 = count_137; // @[ToAXI4.scala:272:28, :286:44]
reg write_137; // @[ToAXI4.scala:273:24]
wire idle_137 = ~count_137; // @[ToAXI4.scala:272:28, :274:26]
wire inc_137 = a_sel_137 & _inc_T_137; // @[Decoupled.scala:51:35]
wire _dec_T_274 = d_sel_137 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_137 = _dec_T_274 & _dec_T_275; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_548 = {1'h0, count_137} + {1'h0, inc_137}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_549 = _count_T_548[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_550 = {1'h0, _count_T_549} - {1'h0, dec_137}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_551 = _count_T_550[0]; // @[ToAXI4.scala:278:37]
wire _idStall_137_T = ~idle_137; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_137_T_3 = _idStall_137_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_137 = _idStall_137_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_138; // @[ToAXI4.scala:272:28]
wire _idStall_138_T_2 = count_138; // @[ToAXI4.scala:272:28, :286:44]
reg write_138; // @[ToAXI4.scala:273:24]
wire idle_138 = ~count_138; // @[ToAXI4.scala:272:28, :274:26]
wire inc_138 = a_sel_138 & _inc_T_138; // @[Decoupled.scala:51:35]
wire _dec_T_276 = d_sel_138 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_138 = _dec_T_276 & _dec_T_277; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_552 = {1'h0, count_138} + {1'h0, inc_138}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_553 = _count_T_552[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_554 = {1'h0, _count_T_553} - {1'h0, dec_138}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_555 = _count_T_554[0]; // @[ToAXI4.scala:278:37]
wire _idStall_138_T = ~idle_138; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_138_T_3 = _idStall_138_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_138 = _idStall_138_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_139; // @[ToAXI4.scala:272:28]
wire _idStall_139_T_2 = count_139; // @[ToAXI4.scala:272:28, :286:44]
reg write_139; // @[ToAXI4.scala:273:24]
wire idle_139 = ~count_139; // @[ToAXI4.scala:272:28, :274:26]
wire inc_139 = a_sel_139 & _inc_T_139; // @[Decoupled.scala:51:35]
wire _dec_T_278 = d_sel_139 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_139 = _dec_T_278 & _dec_T_279; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_556 = {1'h0, count_139} + {1'h0, inc_139}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_557 = _count_T_556[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_558 = {1'h0, _count_T_557} - {1'h0, dec_139}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_559 = _count_T_558[0]; // @[ToAXI4.scala:278:37]
wire _idStall_139_T = ~idle_139; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_139_T_3 = _idStall_139_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_139 = _idStall_139_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_140; // @[ToAXI4.scala:272:28]
wire _idStall_140_T_2 = count_140; // @[ToAXI4.scala:272:28, :286:44]
reg write_140; // @[ToAXI4.scala:273:24]
wire idle_140 = ~count_140; // @[ToAXI4.scala:272:28, :274:26]
wire inc_140 = a_sel_140 & _inc_T_140; // @[Decoupled.scala:51:35]
wire _dec_T_280 = d_sel_140 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_140 = _dec_T_280 & _dec_T_281; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_560 = {1'h0, count_140} + {1'h0, inc_140}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_561 = _count_T_560[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_562 = {1'h0, _count_T_561} - {1'h0, dec_140}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_563 = _count_T_562[0]; // @[ToAXI4.scala:278:37]
wire _idStall_140_T = ~idle_140; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_140_T_3 = _idStall_140_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_140 = _idStall_140_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_141; // @[ToAXI4.scala:272:28]
wire _idStall_141_T_2 = count_141; // @[ToAXI4.scala:272:28, :286:44]
reg write_141; // @[ToAXI4.scala:273:24]
wire idle_141 = ~count_141; // @[ToAXI4.scala:272:28, :274:26]
wire inc_141 = a_sel_141 & _inc_T_141; // @[Decoupled.scala:51:35]
wire _dec_T_282 = d_sel_141 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_141 = _dec_T_282 & _dec_T_283; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_564 = {1'h0, count_141} + {1'h0, inc_141}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_565 = _count_T_564[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_566 = {1'h0, _count_T_565} - {1'h0, dec_141}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_567 = _count_T_566[0]; // @[ToAXI4.scala:278:37]
wire _idStall_141_T = ~idle_141; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_141_T_3 = _idStall_141_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_141 = _idStall_141_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_142; // @[ToAXI4.scala:272:28]
wire _idStall_142_T_2 = count_142; // @[ToAXI4.scala:272:28, :286:44]
reg write_142; // @[ToAXI4.scala:273:24]
wire idle_142 = ~count_142; // @[ToAXI4.scala:272:28, :274:26]
wire inc_142 = a_sel_142 & _inc_T_142; // @[Decoupled.scala:51:35]
wire _dec_T_284 = d_sel_142 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_142 = _dec_T_284 & _dec_T_285; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_568 = {1'h0, count_142} + {1'h0, inc_142}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_569 = _count_T_568[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_570 = {1'h0, _count_T_569} - {1'h0, dec_142}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_571 = _count_T_570[0]; // @[ToAXI4.scala:278:37]
wire _idStall_142_T = ~idle_142; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_142_T_3 = _idStall_142_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_142 = _idStall_142_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_143; // @[ToAXI4.scala:272:28]
wire _idStall_143_T_2 = count_143; // @[ToAXI4.scala:272:28, :286:44]
reg write_143; // @[ToAXI4.scala:273:24]
wire idle_143 = ~count_143; // @[ToAXI4.scala:272:28, :274:26]
wire inc_143 = a_sel_143 & _inc_T_143; // @[Decoupled.scala:51:35]
wire _dec_T_286 = d_sel_143 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_143 = _dec_T_286 & _dec_T_287; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_572 = {1'h0, count_143} + {1'h0, inc_143}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_573 = _count_T_572[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_574 = {1'h0, _count_T_573} - {1'h0, dec_143}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_575 = _count_T_574[0]; // @[ToAXI4.scala:278:37]
wire _idStall_143_T = ~idle_143; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_143_T_3 = _idStall_143_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_143 = _idStall_143_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_144; // @[ToAXI4.scala:272:28]
wire _idStall_144_T_2 = count_144; // @[ToAXI4.scala:272:28, :286:44]
reg write_144; // @[ToAXI4.scala:273:24]
wire idle_144 = ~count_144; // @[ToAXI4.scala:272:28, :274:26]
wire inc_144 = a_sel_144 & _inc_T_144; // @[Decoupled.scala:51:35]
wire _dec_T_288 = d_sel_144 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_144 = _dec_T_288 & _dec_T_289; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_576 = {1'h0, count_144} + {1'h0, inc_144}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_577 = _count_T_576[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_578 = {1'h0, _count_T_577} - {1'h0, dec_144}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_579 = _count_T_578[0]; // @[ToAXI4.scala:278:37]
wire _idStall_144_T = ~idle_144; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_144_T_3 = _idStall_144_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_144 = _idStall_144_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_145; // @[ToAXI4.scala:272:28]
wire _idStall_145_T_2 = count_145; // @[ToAXI4.scala:272:28, :286:44]
reg write_145; // @[ToAXI4.scala:273:24]
wire idle_145 = ~count_145; // @[ToAXI4.scala:272:28, :274:26]
wire inc_145 = a_sel_145 & _inc_T_145; // @[Decoupled.scala:51:35]
wire _dec_T_290 = d_sel_145 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_145 = _dec_T_290 & _dec_T_291; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_580 = {1'h0, count_145} + {1'h0, inc_145}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_581 = _count_T_580[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_582 = {1'h0, _count_T_581} - {1'h0, dec_145}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_583 = _count_T_582[0]; // @[ToAXI4.scala:278:37]
wire _idStall_145_T = ~idle_145; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_145_T_3 = _idStall_145_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_145 = _idStall_145_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_146; // @[ToAXI4.scala:272:28]
wire _idStall_146_T_2 = count_146; // @[ToAXI4.scala:272:28, :286:44]
reg write_146; // @[ToAXI4.scala:273:24]
wire idle_146 = ~count_146; // @[ToAXI4.scala:272:28, :274:26]
wire inc_146 = a_sel_146 & _inc_T_146; // @[Decoupled.scala:51:35]
wire _dec_T_292 = d_sel_146 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_146 = _dec_T_292 & _dec_T_293; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_584 = {1'h0, count_146} + {1'h0, inc_146}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_585 = _count_T_584[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_586 = {1'h0, _count_T_585} - {1'h0, dec_146}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_587 = _count_T_586[0]; // @[ToAXI4.scala:278:37]
wire _idStall_146_T = ~idle_146; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_146_T_3 = _idStall_146_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_146 = _idStall_146_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_147; // @[ToAXI4.scala:272:28]
wire _idStall_147_T_2 = count_147; // @[ToAXI4.scala:272:28, :286:44]
reg write_147; // @[ToAXI4.scala:273:24]
wire idle_147 = ~count_147; // @[ToAXI4.scala:272:28, :274:26]
wire inc_147 = a_sel_147 & _inc_T_147; // @[Decoupled.scala:51:35]
wire _dec_T_294 = d_sel_147 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_147 = _dec_T_294 & _dec_T_295; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_588 = {1'h0, count_147} + {1'h0, inc_147}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_589 = _count_T_588[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_590 = {1'h0, _count_T_589} - {1'h0, dec_147}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_591 = _count_T_590[0]; // @[ToAXI4.scala:278:37]
wire _idStall_147_T = ~idle_147; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_147_T_3 = _idStall_147_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_147 = _idStall_147_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_148; // @[ToAXI4.scala:272:28]
wire _idStall_148_T_2 = count_148; // @[ToAXI4.scala:272:28, :286:44]
reg write_148; // @[ToAXI4.scala:273:24]
wire idle_148 = ~count_148; // @[ToAXI4.scala:272:28, :274:26]
wire inc_148 = a_sel_148 & _inc_T_148; // @[Decoupled.scala:51:35]
wire _dec_T_296 = d_sel_148 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_148 = _dec_T_296 & _dec_T_297; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_592 = {1'h0, count_148} + {1'h0, inc_148}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_593 = _count_T_592[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_594 = {1'h0, _count_T_593} - {1'h0, dec_148}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_595 = _count_T_594[0]; // @[ToAXI4.scala:278:37]
wire _idStall_148_T = ~idle_148; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_148_T_3 = _idStall_148_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_148 = _idStall_148_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_149; // @[ToAXI4.scala:272:28]
wire _idStall_149_T_2 = count_149; // @[ToAXI4.scala:272:28, :286:44]
reg write_149; // @[ToAXI4.scala:273:24]
wire idle_149 = ~count_149; // @[ToAXI4.scala:272:28, :274:26]
wire inc_149 = a_sel_149 & _inc_T_149; // @[Decoupled.scala:51:35]
wire _dec_T_298 = d_sel_149 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_149 = _dec_T_298 & _dec_T_299; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_596 = {1'h0, count_149} + {1'h0, inc_149}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_597 = _count_T_596[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_598 = {1'h0, _count_T_597} - {1'h0, dec_149}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_599 = _count_T_598[0]; // @[ToAXI4.scala:278:37]
wire _idStall_149_T = ~idle_149; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_149_T_3 = _idStall_149_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_149 = _idStall_149_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_150; // @[ToAXI4.scala:272:28]
wire _idStall_150_T_2 = count_150; // @[ToAXI4.scala:272:28, :286:44]
reg write_150; // @[ToAXI4.scala:273:24]
wire idle_150 = ~count_150; // @[ToAXI4.scala:272:28, :274:26]
wire inc_150 = a_sel_150 & _inc_T_150; // @[Decoupled.scala:51:35]
wire _dec_T_300 = d_sel_150 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_150 = _dec_T_300 & _dec_T_301; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_600 = {1'h0, count_150} + {1'h0, inc_150}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_601 = _count_T_600[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_602 = {1'h0, _count_T_601} - {1'h0, dec_150}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_603 = _count_T_602[0]; // @[ToAXI4.scala:278:37]
wire _idStall_150_T = ~idle_150; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_150_T_3 = _idStall_150_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_150 = _idStall_150_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_151; // @[ToAXI4.scala:272:28]
wire _idStall_151_T_2 = count_151; // @[ToAXI4.scala:272:28, :286:44]
reg write_151; // @[ToAXI4.scala:273:24]
wire idle_151 = ~count_151; // @[ToAXI4.scala:272:28, :274:26]
wire inc_151 = a_sel_151 & _inc_T_151; // @[Decoupled.scala:51:35]
wire _dec_T_302 = d_sel_151 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_151 = _dec_T_302 & _dec_T_303; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_604 = {1'h0, count_151} + {1'h0, inc_151}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_605 = _count_T_604[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_606 = {1'h0, _count_T_605} - {1'h0, dec_151}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_607 = _count_T_606[0]; // @[ToAXI4.scala:278:37]
wire _idStall_151_T = ~idle_151; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_151_T_3 = _idStall_151_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_151 = _idStall_151_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_152; // @[ToAXI4.scala:272:28]
wire _idStall_152_T_2 = count_152; // @[ToAXI4.scala:272:28, :286:44]
reg write_152; // @[ToAXI4.scala:273:24]
wire idle_152 = ~count_152; // @[ToAXI4.scala:272:28, :274:26]
wire inc_152 = a_sel_152 & _inc_T_152; // @[Decoupled.scala:51:35]
wire _dec_T_304 = d_sel_152 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_152 = _dec_T_304 & _dec_T_305; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_608 = {1'h0, count_152} + {1'h0, inc_152}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_609 = _count_T_608[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_610 = {1'h0, _count_T_609} - {1'h0, dec_152}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_611 = _count_T_610[0]; // @[ToAXI4.scala:278:37]
wire _idStall_152_T = ~idle_152; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_152_T_3 = _idStall_152_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_152 = _idStall_152_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_153; // @[ToAXI4.scala:272:28]
wire _idStall_153_T_2 = count_153; // @[ToAXI4.scala:272:28, :286:44]
reg write_153; // @[ToAXI4.scala:273:24]
wire idle_153 = ~count_153; // @[ToAXI4.scala:272:28, :274:26]
wire inc_153 = a_sel_153 & _inc_T_153; // @[Decoupled.scala:51:35]
wire _dec_T_306 = d_sel_153 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_153 = _dec_T_306 & _dec_T_307; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_612 = {1'h0, count_153} + {1'h0, inc_153}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_613 = _count_T_612[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_614 = {1'h0, _count_T_613} - {1'h0, dec_153}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_615 = _count_T_614[0]; // @[ToAXI4.scala:278:37]
wire _idStall_153_T = ~idle_153; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_153_T_3 = _idStall_153_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_153 = _idStall_153_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_154; // @[ToAXI4.scala:272:28]
wire _idStall_154_T_2 = count_154; // @[ToAXI4.scala:272:28, :286:44]
reg write_154; // @[ToAXI4.scala:273:24]
wire idle_154 = ~count_154; // @[ToAXI4.scala:272:28, :274:26]
wire inc_154 = a_sel_154 & _inc_T_154; // @[Decoupled.scala:51:35]
wire _dec_T_308 = d_sel_154 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_154 = _dec_T_308 & _dec_T_309; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_616 = {1'h0, count_154} + {1'h0, inc_154}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_617 = _count_T_616[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_618 = {1'h0, _count_T_617} - {1'h0, dec_154}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_619 = _count_T_618[0]; // @[ToAXI4.scala:278:37]
wire _idStall_154_T = ~idle_154; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_154_T_3 = _idStall_154_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_154 = _idStall_154_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_155; // @[ToAXI4.scala:272:28]
wire _idStall_155_T_2 = count_155; // @[ToAXI4.scala:272:28, :286:44]
reg write_155; // @[ToAXI4.scala:273:24]
wire idle_155 = ~count_155; // @[ToAXI4.scala:272:28, :274:26]
wire inc_155 = a_sel_155 & _inc_T_155; // @[Decoupled.scala:51:35]
wire _dec_T_310 = d_sel_155 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_155 = _dec_T_310 & _dec_T_311; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_620 = {1'h0, count_155} + {1'h0, inc_155}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_621 = _count_T_620[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_622 = {1'h0, _count_T_621} - {1'h0, dec_155}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_623 = _count_T_622[0]; // @[ToAXI4.scala:278:37]
wire _idStall_155_T = ~idle_155; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_155_T_3 = _idStall_155_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_155 = _idStall_155_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_156; // @[ToAXI4.scala:272:28]
wire _idStall_156_T_2 = count_156; // @[ToAXI4.scala:272:28, :286:44]
reg write_156; // @[ToAXI4.scala:273:24]
wire idle_156 = ~count_156; // @[ToAXI4.scala:272:28, :274:26]
wire inc_156 = a_sel_156 & _inc_T_156; // @[Decoupled.scala:51:35]
wire _dec_T_312 = d_sel_156 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_156 = _dec_T_312 & _dec_T_313; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_624 = {1'h0, count_156} + {1'h0, inc_156}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_625 = _count_T_624[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_626 = {1'h0, _count_T_625} - {1'h0, dec_156}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_627 = _count_T_626[0]; // @[ToAXI4.scala:278:37]
wire _idStall_156_T = ~idle_156; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_156_T_3 = _idStall_156_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_156 = _idStall_156_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_157; // @[ToAXI4.scala:272:28]
wire _idStall_157_T_2 = count_157; // @[ToAXI4.scala:272:28, :286:44]
reg write_157; // @[ToAXI4.scala:273:24]
wire idle_157 = ~count_157; // @[ToAXI4.scala:272:28, :274:26]
wire inc_157 = a_sel_157 & _inc_T_157; // @[Decoupled.scala:51:35]
wire _dec_T_314 = d_sel_157 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_157 = _dec_T_314 & _dec_T_315; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_628 = {1'h0, count_157} + {1'h0, inc_157}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_629 = _count_T_628[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_630 = {1'h0, _count_T_629} - {1'h0, dec_157}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_631 = _count_T_630[0]; // @[ToAXI4.scala:278:37]
wire _idStall_157_T = ~idle_157; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_157_T_3 = _idStall_157_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_157 = _idStall_157_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_158; // @[ToAXI4.scala:272:28]
wire _idStall_158_T_2 = count_158; // @[ToAXI4.scala:272:28, :286:44]
reg write_158; // @[ToAXI4.scala:273:24]
wire idle_158 = ~count_158; // @[ToAXI4.scala:272:28, :274:26]
wire inc_158 = a_sel_158 & _inc_T_158; // @[Decoupled.scala:51:35]
wire _dec_T_316 = d_sel_158 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_158 = _dec_T_316 & _dec_T_317; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_632 = {1'h0, count_158} + {1'h0, inc_158}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_633 = _count_T_632[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_634 = {1'h0, _count_T_633} - {1'h0, dec_158}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_635 = _count_T_634[0]; // @[ToAXI4.scala:278:37]
wire _idStall_158_T = ~idle_158; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_158_T_3 = _idStall_158_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_158 = _idStall_158_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_159; // @[ToAXI4.scala:272:28]
wire _idStall_159_T_2 = count_159; // @[ToAXI4.scala:272:28, :286:44]
reg write_159; // @[ToAXI4.scala:273:24]
wire idle_159 = ~count_159; // @[ToAXI4.scala:272:28, :274:26]
wire inc_159 = a_sel_159 & _inc_T_159; // @[Decoupled.scala:51:35]
wire _dec_T_318 = d_sel_159 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_159 = _dec_T_318 & _dec_T_319; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_636 = {1'h0, count_159} + {1'h0, inc_159}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_637 = _count_T_636[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_638 = {1'h0, _count_T_637} - {1'h0, dec_159}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_639 = _count_T_638[0]; // @[ToAXI4.scala:278:37]
wire _idStall_159_T = ~idle_159; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_159_T_3 = _idStall_159_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_159 = _idStall_159_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_160; // @[ToAXI4.scala:272:28]
wire _idStall_160_T_2 = count_160; // @[ToAXI4.scala:272:28, :286:44]
reg write_160; // @[ToAXI4.scala:273:24]
wire idle_160 = ~count_160; // @[ToAXI4.scala:272:28, :274:26]
wire inc_160 = a_sel_160 & _inc_T_160; // @[Decoupled.scala:51:35]
wire _dec_T_320 = d_sel_160 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_160 = _dec_T_320 & _dec_T_321; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_640 = {1'h0, count_160} + {1'h0, inc_160}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_641 = _count_T_640[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_642 = {1'h0, _count_T_641} - {1'h0, dec_160}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_643 = _count_T_642[0]; // @[ToAXI4.scala:278:37]
wire _idStall_160_T = ~idle_160; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_160_T_3 = _idStall_160_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_160 = _idStall_160_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_161; // @[ToAXI4.scala:272:28]
wire _idStall_161_T_2 = count_161; // @[ToAXI4.scala:272:28, :286:44]
reg write_161; // @[ToAXI4.scala:273:24]
wire idle_161 = ~count_161; // @[ToAXI4.scala:272:28, :274:26]
wire inc_161 = a_sel_161 & _inc_T_161; // @[Decoupled.scala:51:35]
wire _dec_T_322 = d_sel_161 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_161 = _dec_T_322 & _dec_T_323; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_644 = {1'h0, count_161} + {1'h0, inc_161}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_645 = _count_T_644[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_646 = {1'h0, _count_T_645} - {1'h0, dec_161}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_647 = _count_T_646[0]; // @[ToAXI4.scala:278:37]
wire _idStall_161_T = ~idle_161; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_161_T_3 = _idStall_161_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_161 = _idStall_161_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_162; // @[ToAXI4.scala:272:28]
wire _idStall_162_T_2 = count_162; // @[ToAXI4.scala:272:28, :286:44]
reg write_162; // @[ToAXI4.scala:273:24]
wire idle_162 = ~count_162; // @[ToAXI4.scala:272:28, :274:26]
wire inc_162 = a_sel_162 & _inc_T_162; // @[Decoupled.scala:51:35]
wire _dec_T_324 = d_sel_162 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_162 = _dec_T_324 & _dec_T_325; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_648 = {1'h0, count_162} + {1'h0, inc_162}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_649 = _count_T_648[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_650 = {1'h0, _count_T_649} - {1'h0, dec_162}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_651 = _count_T_650[0]; // @[ToAXI4.scala:278:37]
wire _idStall_162_T = ~idle_162; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_162_T_3 = _idStall_162_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_162 = _idStall_162_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_163; // @[ToAXI4.scala:272:28]
wire _idStall_163_T_2 = count_163; // @[ToAXI4.scala:272:28, :286:44]
reg write_163; // @[ToAXI4.scala:273:24]
wire idle_163 = ~count_163; // @[ToAXI4.scala:272:28, :274:26]
wire inc_163 = a_sel_163 & _inc_T_163; // @[Decoupled.scala:51:35]
wire _dec_T_326 = d_sel_163 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_163 = _dec_T_326 & _dec_T_327; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_652 = {1'h0, count_163} + {1'h0, inc_163}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_653 = _count_T_652[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_654 = {1'h0, _count_T_653} - {1'h0, dec_163}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_655 = _count_T_654[0]; // @[ToAXI4.scala:278:37]
wire _idStall_163_T = ~idle_163; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_163_T_3 = _idStall_163_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_163 = _idStall_163_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_164; // @[ToAXI4.scala:272:28]
wire _idStall_164_T_2 = count_164; // @[ToAXI4.scala:272:28, :286:44]
reg write_164; // @[ToAXI4.scala:273:24]
wire idle_164 = ~count_164; // @[ToAXI4.scala:272:28, :274:26]
wire inc_164 = a_sel_164 & _inc_T_164; // @[Decoupled.scala:51:35]
wire _dec_T_328 = d_sel_164 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_164 = _dec_T_328 & _dec_T_329; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_656 = {1'h0, count_164} + {1'h0, inc_164}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_657 = _count_T_656[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_658 = {1'h0, _count_T_657} - {1'h0, dec_164}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_659 = _count_T_658[0]; // @[ToAXI4.scala:278:37]
wire _idStall_164_T = ~idle_164; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_164_T_3 = _idStall_164_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_164 = _idStall_164_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_165; // @[ToAXI4.scala:272:28]
wire _idStall_165_T_2 = count_165; // @[ToAXI4.scala:272:28, :286:44]
reg write_165; // @[ToAXI4.scala:273:24]
wire idle_165 = ~count_165; // @[ToAXI4.scala:272:28, :274:26]
wire inc_165 = a_sel_165 & _inc_T_165; // @[Decoupled.scala:51:35]
wire _dec_T_330 = d_sel_165 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_165 = _dec_T_330 & _dec_T_331; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_660 = {1'h0, count_165} + {1'h0, inc_165}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_661 = _count_T_660[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_662 = {1'h0, _count_T_661} - {1'h0, dec_165}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_663 = _count_T_662[0]; // @[ToAXI4.scala:278:37]
wire _idStall_165_T = ~idle_165; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_165_T_3 = _idStall_165_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_165 = _idStall_165_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_166; // @[ToAXI4.scala:272:28]
wire _idStall_166_T_2 = count_166; // @[ToAXI4.scala:272:28, :286:44]
reg write_166; // @[ToAXI4.scala:273:24]
wire idle_166 = ~count_166; // @[ToAXI4.scala:272:28, :274:26]
wire inc_166 = a_sel_166 & _inc_T_166; // @[Decoupled.scala:51:35]
wire _dec_T_332 = d_sel_166 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_166 = _dec_T_332 & _dec_T_333; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_664 = {1'h0, count_166} + {1'h0, inc_166}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_665 = _count_T_664[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_666 = {1'h0, _count_T_665} - {1'h0, dec_166}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_667 = _count_T_666[0]; // @[ToAXI4.scala:278:37]
wire _idStall_166_T = ~idle_166; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_166_T_3 = _idStall_166_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_166 = _idStall_166_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_167; // @[ToAXI4.scala:272:28]
wire _idStall_167_T_2 = count_167; // @[ToAXI4.scala:272:28, :286:44]
reg write_167; // @[ToAXI4.scala:273:24]
wire idle_167 = ~count_167; // @[ToAXI4.scala:272:28, :274:26]
wire inc_167 = a_sel_167 & _inc_T_167; // @[Decoupled.scala:51:35]
wire _dec_T_334 = d_sel_167 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_167 = _dec_T_334 & _dec_T_335; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_668 = {1'h0, count_167} + {1'h0, inc_167}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_669 = _count_T_668[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_670 = {1'h0, _count_T_669} - {1'h0, dec_167}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_671 = _count_T_670[0]; // @[ToAXI4.scala:278:37]
wire _idStall_167_T = ~idle_167; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_167_T_3 = _idStall_167_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_167 = _idStall_167_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_168; // @[ToAXI4.scala:272:28]
wire _idStall_168_T_2 = count_168; // @[ToAXI4.scala:272:28, :286:44]
reg write_168; // @[ToAXI4.scala:273:24]
wire idle_168 = ~count_168; // @[ToAXI4.scala:272:28, :274:26]
wire inc_168 = a_sel_168 & _inc_T_168; // @[Decoupled.scala:51:35]
wire _dec_T_336 = d_sel_168 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_168 = _dec_T_336 & _dec_T_337; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_672 = {1'h0, count_168} + {1'h0, inc_168}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_673 = _count_T_672[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_674 = {1'h0, _count_T_673} - {1'h0, dec_168}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_675 = _count_T_674[0]; // @[ToAXI4.scala:278:37]
wire _idStall_168_T = ~idle_168; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_168_T_3 = _idStall_168_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_168 = _idStall_168_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_169; // @[ToAXI4.scala:272:28]
wire _idStall_169_T_2 = count_169; // @[ToAXI4.scala:272:28, :286:44]
reg write_169; // @[ToAXI4.scala:273:24]
wire idle_169 = ~count_169; // @[ToAXI4.scala:272:28, :274:26]
wire inc_169 = a_sel_169 & _inc_T_169; // @[Decoupled.scala:51:35]
wire _dec_T_338 = d_sel_169 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_169 = _dec_T_338 & _dec_T_339; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_676 = {1'h0, count_169} + {1'h0, inc_169}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_677 = _count_T_676[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_678 = {1'h0, _count_T_677} - {1'h0, dec_169}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_679 = _count_T_678[0]; // @[ToAXI4.scala:278:37]
wire _idStall_169_T = ~idle_169; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_169_T_3 = _idStall_169_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_169 = _idStall_169_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_170; // @[ToAXI4.scala:272:28]
wire _idStall_170_T_2 = count_170; // @[ToAXI4.scala:272:28, :286:44]
reg write_170; // @[ToAXI4.scala:273:24]
wire idle_170 = ~count_170; // @[ToAXI4.scala:272:28, :274:26]
wire inc_170 = a_sel_170 & _inc_T_170; // @[Decoupled.scala:51:35]
wire _dec_T_340 = d_sel_170 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_170 = _dec_T_340 & _dec_T_341; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_680 = {1'h0, count_170} + {1'h0, inc_170}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_681 = _count_T_680[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_682 = {1'h0, _count_T_681} - {1'h0, dec_170}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_683 = _count_T_682[0]; // @[ToAXI4.scala:278:37]
wire _idStall_170_T = ~idle_170; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_170_T_3 = _idStall_170_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_170 = _idStall_170_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_171; // @[ToAXI4.scala:272:28]
wire _idStall_171_T_2 = count_171; // @[ToAXI4.scala:272:28, :286:44]
reg write_171; // @[ToAXI4.scala:273:24]
wire idle_171 = ~count_171; // @[ToAXI4.scala:272:28, :274:26]
wire inc_171 = a_sel_171 & _inc_T_171; // @[Decoupled.scala:51:35]
wire _dec_T_342 = d_sel_171 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_171 = _dec_T_342 & _dec_T_343; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_684 = {1'h0, count_171} + {1'h0, inc_171}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_685 = _count_T_684[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_686 = {1'h0, _count_T_685} - {1'h0, dec_171}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_687 = _count_T_686[0]; // @[ToAXI4.scala:278:37]
wire _idStall_171_T = ~idle_171; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_171_T_3 = _idStall_171_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_171 = _idStall_171_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_172; // @[ToAXI4.scala:272:28]
wire _idStall_172_T_2 = count_172; // @[ToAXI4.scala:272:28, :286:44]
reg write_172; // @[ToAXI4.scala:273:24]
wire idle_172 = ~count_172; // @[ToAXI4.scala:272:28, :274:26]
wire inc_172 = a_sel_172 & _inc_T_172; // @[Decoupled.scala:51:35]
wire _dec_T_344 = d_sel_172 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_172 = _dec_T_344 & _dec_T_345; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_688 = {1'h0, count_172} + {1'h0, inc_172}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_689 = _count_T_688[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_690 = {1'h0, _count_T_689} - {1'h0, dec_172}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_691 = _count_T_690[0]; // @[ToAXI4.scala:278:37]
wire _idStall_172_T = ~idle_172; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_172_T_3 = _idStall_172_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_172 = _idStall_172_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_173; // @[ToAXI4.scala:272:28]
wire _idStall_173_T_2 = count_173; // @[ToAXI4.scala:272:28, :286:44]
reg write_173; // @[ToAXI4.scala:273:24]
wire idle_173 = ~count_173; // @[ToAXI4.scala:272:28, :274:26]
wire inc_173 = a_sel_173 & _inc_T_173; // @[Decoupled.scala:51:35]
wire _dec_T_346 = d_sel_173 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_173 = _dec_T_346 & _dec_T_347; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_692 = {1'h0, count_173} + {1'h0, inc_173}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_693 = _count_T_692[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_694 = {1'h0, _count_T_693} - {1'h0, dec_173}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_695 = _count_T_694[0]; // @[ToAXI4.scala:278:37]
wire _idStall_173_T = ~idle_173; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_173_T_3 = _idStall_173_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_173 = _idStall_173_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_174; // @[ToAXI4.scala:272:28]
wire _idStall_174_T_2 = count_174; // @[ToAXI4.scala:272:28, :286:44]
reg write_174; // @[ToAXI4.scala:273:24]
wire idle_174 = ~count_174; // @[ToAXI4.scala:272:28, :274:26]
wire inc_174 = a_sel_174 & _inc_T_174; // @[Decoupled.scala:51:35]
wire _dec_T_348 = d_sel_174 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_174 = _dec_T_348 & _dec_T_349; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_696 = {1'h0, count_174} + {1'h0, inc_174}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_697 = _count_T_696[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_698 = {1'h0, _count_T_697} - {1'h0, dec_174}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_699 = _count_T_698[0]; // @[ToAXI4.scala:278:37]
wire _idStall_174_T = ~idle_174; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_174_T_3 = _idStall_174_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_174 = _idStall_174_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_175; // @[ToAXI4.scala:272:28]
wire _idStall_175_T_2 = count_175; // @[ToAXI4.scala:272:28, :286:44]
reg write_175; // @[ToAXI4.scala:273:24]
wire idle_175 = ~count_175; // @[ToAXI4.scala:272:28, :274:26]
wire inc_175 = a_sel_175 & _inc_T_175; // @[Decoupled.scala:51:35]
wire _dec_T_350 = d_sel_175 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_175 = _dec_T_350 & _dec_T_351; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_700 = {1'h0, count_175} + {1'h0, inc_175}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_701 = _count_T_700[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_702 = {1'h0, _count_T_701} - {1'h0, dec_175}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_703 = _count_T_702[0]; // @[ToAXI4.scala:278:37]
wire _idStall_175_T = ~idle_175; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_175_T_3 = _idStall_175_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_175 = _idStall_175_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_176; // @[ToAXI4.scala:272:28]
wire _idStall_176_T_2 = count_176; // @[ToAXI4.scala:272:28, :286:44]
reg write_176; // @[ToAXI4.scala:273:24]
wire idle_176 = ~count_176; // @[ToAXI4.scala:272:28, :274:26]
wire inc_176 = a_sel_176 & _inc_T_176; // @[Decoupled.scala:51:35]
wire _dec_T_352 = d_sel_176 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_176 = _dec_T_352 & _dec_T_353; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_704 = {1'h0, count_176} + {1'h0, inc_176}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_705 = _count_T_704[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_706 = {1'h0, _count_T_705} - {1'h0, dec_176}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_707 = _count_T_706[0]; // @[ToAXI4.scala:278:37]
wire _idStall_176_T = ~idle_176; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_176_T_3 = _idStall_176_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_176 = _idStall_176_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_177; // @[ToAXI4.scala:272:28]
wire _idStall_177_T_2 = count_177; // @[ToAXI4.scala:272:28, :286:44]
reg write_177; // @[ToAXI4.scala:273:24]
wire idle_177 = ~count_177; // @[ToAXI4.scala:272:28, :274:26]
wire inc_177 = a_sel_177 & _inc_T_177; // @[Decoupled.scala:51:35]
wire _dec_T_354 = d_sel_177 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_177 = _dec_T_354 & _dec_T_355; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_708 = {1'h0, count_177} + {1'h0, inc_177}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_709 = _count_T_708[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_710 = {1'h0, _count_T_709} - {1'h0, dec_177}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_711 = _count_T_710[0]; // @[ToAXI4.scala:278:37]
wire _idStall_177_T = ~idle_177; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_177_T_3 = _idStall_177_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_177 = _idStall_177_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_178; // @[ToAXI4.scala:272:28]
wire _idStall_178_T_2 = count_178; // @[ToAXI4.scala:272:28, :286:44]
reg write_178; // @[ToAXI4.scala:273:24]
wire idle_178 = ~count_178; // @[ToAXI4.scala:272:28, :274:26]
wire inc_178 = a_sel_178 & _inc_T_178; // @[Decoupled.scala:51:35]
wire _dec_T_356 = d_sel_178 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_178 = _dec_T_356 & _dec_T_357; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_712 = {1'h0, count_178} + {1'h0, inc_178}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_713 = _count_T_712[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_714 = {1'h0, _count_T_713} - {1'h0, dec_178}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_715 = _count_T_714[0]; // @[ToAXI4.scala:278:37]
wire _idStall_178_T = ~idle_178; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_178_T_3 = _idStall_178_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_178 = _idStall_178_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_179; // @[ToAXI4.scala:272:28]
wire _idStall_179_T_2 = count_179; // @[ToAXI4.scala:272:28, :286:44]
reg write_179; // @[ToAXI4.scala:273:24]
wire idle_179 = ~count_179; // @[ToAXI4.scala:272:28, :274:26]
wire inc_179 = a_sel_179 & _inc_T_179; // @[Decoupled.scala:51:35]
wire _dec_T_358 = d_sel_179 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_179 = _dec_T_358 & _dec_T_359; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_716 = {1'h0, count_179} + {1'h0, inc_179}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_717 = _count_T_716[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_718 = {1'h0, _count_T_717} - {1'h0, dec_179}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_719 = _count_T_718[0]; // @[ToAXI4.scala:278:37]
wire _idStall_179_T = ~idle_179; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_179_T_3 = _idStall_179_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_179 = _idStall_179_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_180; // @[ToAXI4.scala:272:28]
wire _idStall_180_T_2 = count_180; // @[ToAXI4.scala:272:28, :286:44]
reg write_180; // @[ToAXI4.scala:273:24]
wire idle_180 = ~count_180; // @[ToAXI4.scala:272:28, :274:26]
wire inc_180 = a_sel_180 & _inc_T_180; // @[Decoupled.scala:51:35]
wire _dec_T_360 = d_sel_180 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_180 = _dec_T_360 & _dec_T_361; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_720 = {1'h0, count_180} + {1'h0, inc_180}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_721 = _count_T_720[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_722 = {1'h0, _count_T_721} - {1'h0, dec_180}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_723 = _count_T_722[0]; // @[ToAXI4.scala:278:37]
wire _idStall_180_T = ~idle_180; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_180_T_3 = _idStall_180_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_180 = _idStall_180_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_181; // @[ToAXI4.scala:272:28]
wire _idStall_181_T_2 = count_181; // @[ToAXI4.scala:272:28, :286:44]
reg write_181; // @[ToAXI4.scala:273:24]
wire idle_181 = ~count_181; // @[ToAXI4.scala:272:28, :274:26]
wire inc_181 = a_sel_181 & _inc_T_181; // @[Decoupled.scala:51:35]
wire _dec_T_362 = d_sel_181 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_181 = _dec_T_362 & _dec_T_363; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_724 = {1'h0, count_181} + {1'h0, inc_181}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_725 = _count_T_724[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_726 = {1'h0, _count_T_725} - {1'h0, dec_181}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_727 = _count_T_726[0]; // @[ToAXI4.scala:278:37]
wire _idStall_181_T = ~idle_181; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_181_T_3 = _idStall_181_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_181 = _idStall_181_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_182; // @[ToAXI4.scala:272:28]
wire _idStall_182_T_2 = count_182; // @[ToAXI4.scala:272:28, :286:44]
reg write_182; // @[ToAXI4.scala:273:24]
wire idle_182 = ~count_182; // @[ToAXI4.scala:272:28, :274:26]
wire inc_182 = a_sel_182 & _inc_T_182; // @[Decoupled.scala:51:35]
wire _dec_T_364 = d_sel_182 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_182 = _dec_T_364 & _dec_T_365; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_728 = {1'h0, count_182} + {1'h0, inc_182}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_729 = _count_T_728[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_730 = {1'h0, _count_T_729} - {1'h0, dec_182}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_731 = _count_T_730[0]; // @[ToAXI4.scala:278:37]
wire _idStall_182_T = ~idle_182; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_182_T_3 = _idStall_182_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_182 = _idStall_182_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_183; // @[ToAXI4.scala:272:28]
wire _idStall_183_T_2 = count_183; // @[ToAXI4.scala:272:28, :286:44]
reg write_183; // @[ToAXI4.scala:273:24]
wire idle_183 = ~count_183; // @[ToAXI4.scala:272:28, :274:26]
wire inc_183 = a_sel_183 & _inc_T_183; // @[Decoupled.scala:51:35]
wire _dec_T_366 = d_sel_183 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_183 = _dec_T_366 & _dec_T_367; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_732 = {1'h0, count_183} + {1'h0, inc_183}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_733 = _count_T_732[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_734 = {1'h0, _count_T_733} - {1'h0, dec_183}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_735 = _count_T_734[0]; // @[ToAXI4.scala:278:37]
wire _idStall_183_T = ~idle_183; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_183_T_3 = _idStall_183_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_183 = _idStall_183_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_184; // @[ToAXI4.scala:272:28]
wire _idStall_184_T_2 = count_184; // @[ToAXI4.scala:272:28, :286:44]
reg write_184; // @[ToAXI4.scala:273:24]
wire idle_184 = ~count_184; // @[ToAXI4.scala:272:28, :274:26]
wire inc_184 = a_sel_184 & _inc_T_184; // @[Decoupled.scala:51:35]
wire _dec_T_368 = d_sel_184 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_184 = _dec_T_368 & _dec_T_369; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_736 = {1'h0, count_184} + {1'h0, inc_184}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_737 = _count_T_736[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_738 = {1'h0, _count_T_737} - {1'h0, dec_184}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_739 = _count_T_738[0]; // @[ToAXI4.scala:278:37]
wire _idStall_184_T = ~idle_184; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_184_T_3 = _idStall_184_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_184 = _idStall_184_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_185; // @[ToAXI4.scala:272:28]
wire _idStall_185_T_2 = count_185; // @[ToAXI4.scala:272:28, :286:44]
reg write_185; // @[ToAXI4.scala:273:24]
wire idle_185 = ~count_185; // @[ToAXI4.scala:272:28, :274:26]
wire inc_185 = a_sel_185 & _inc_T_185; // @[Decoupled.scala:51:35]
wire _dec_T_370 = d_sel_185 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_185 = _dec_T_370 & _dec_T_371; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_740 = {1'h0, count_185} + {1'h0, inc_185}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_741 = _count_T_740[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_742 = {1'h0, _count_T_741} - {1'h0, dec_185}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_743 = _count_T_742[0]; // @[ToAXI4.scala:278:37]
wire _idStall_185_T = ~idle_185; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_185_T_3 = _idStall_185_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_185 = _idStall_185_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_186; // @[ToAXI4.scala:272:28]
wire _idStall_186_T_2 = count_186; // @[ToAXI4.scala:272:28, :286:44]
reg write_186; // @[ToAXI4.scala:273:24]
wire idle_186 = ~count_186; // @[ToAXI4.scala:272:28, :274:26]
wire inc_186 = a_sel_186 & _inc_T_186; // @[Decoupled.scala:51:35]
wire _dec_T_372 = d_sel_186 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_186 = _dec_T_372 & _dec_T_373; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_744 = {1'h0, count_186} + {1'h0, inc_186}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_745 = _count_T_744[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_746 = {1'h0, _count_T_745} - {1'h0, dec_186}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_747 = _count_T_746[0]; // @[ToAXI4.scala:278:37]
wire _idStall_186_T = ~idle_186; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_186_T_3 = _idStall_186_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_186 = _idStall_186_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_187; // @[ToAXI4.scala:272:28]
wire _idStall_187_T_2 = count_187; // @[ToAXI4.scala:272:28, :286:44]
reg write_187; // @[ToAXI4.scala:273:24]
wire idle_187 = ~count_187; // @[ToAXI4.scala:272:28, :274:26]
wire inc_187 = a_sel_187 & _inc_T_187; // @[Decoupled.scala:51:35]
wire _dec_T_374 = d_sel_187 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_187 = _dec_T_374 & _dec_T_375; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_748 = {1'h0, count_187} + {1'h0, inc_187}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_749 = _count_T_748[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_750 = {1'h0, _count_T_749} - {1'h0, dec_187}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_751 = _count_T_750[0]; // @[ToAXI4.scala:278:37]
wire _idStall_187_T = ~idle_187; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_187_T_3 = _idStall_187_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_187 = _idStall_187_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_188; // @[ToAXI4.scala:272:28]
wire _idStall_188_T_2 = count_188; // @[ToAXI4.scala:272:28, :286:44]
reg write_188; // @[ToAXI4.scala:273:24]
wire idle_188 = ~count_188; // @[ToAXI4.scala:272:28, :274:26]
wire inc_188 = a_sel_188 & _inc_T_188; // @[Decoupled.scala:51:35]
wire _dec_T_376 = d_sel_188 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_188 = _dec_T_376 & _dec_T_377; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_752 = {1'h0, count_188} + {1'h0, inc_188}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_753 = _count_T_752[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_754 = {1'h0, _count_T_753} - {1'h0, dec_188}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_755 = _count_T_754[0]; // @[ToAXI4.scala:278:37]
wire _idStall_188_T = ~idle_188; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_188_T_3 = _idStall_188_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_188 = _idStall_188_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_189; // @[ToAXI4.scala:272:28]
wire _idStall_189_T_2 = count_189; // @[ToAXI4.scala:272:28, :286:44]
reg write_189; // @[ToAXI4.scala:273:24]
wire idle_189 = ~count_189; // @[ToAXI4.scala:272:28, :274:26]
wire inc_189 = a_sel_189 & _inc_T_189; // @[Decoupled.scala:51:35]
wire _dec_T_378 = d_sel_189 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_189 = _dec_T_378 & _dec_T_379; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_756 = {1'h0, count_189} + {1'h0, inc_189}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_757 = _count_T_756[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_758 = {1'h0, _count_T_757} - {1'h0, dec_189}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_759 = _count_T_758[0]; // @[ToAXI4.scala:278:37]
wire _idStall_189_T = ~idle_189; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_189_T_3 = _idStall_189_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_189 = _idStall_189_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_190; // @[ToAXI4.scala:272:28]
wire _idStall_190_T_2 = count_190; // @[ToAXI4.scala:272:28, :286:44]
reg write_190; // @[ToAXI4.scala:273:24]
wire idle_190 = ~count_190; // @[ToAXI4.scala:272:28, :274:26]
wire inc_190 = a_sel_190 & _inc_T_190; // @[Decoupled.scala:51:35]
wire _dec_T_380 = d_sel_190 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_190 = _dec_T_380 & _dec_T_381; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_760 = {1'h0, count_190} + {1'h0, inc_190}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_761 = _count_T_760[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_762 = {1'h0, _count_T_761} - {1'h0, dec_190}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_763 = _count_T_762[0]; // @[ToAXI4.scala:278:37]
wire _idStall_190_T = ~idle_190; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_190_T_3 = _idStall_190_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_190 = _idStall_190_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_191; // @[ToAXI4.scala:272:28]
wire _idStall_191_T_2 = count_191; // @[ToAXI4.scala:272:28, :286:44]
reg write_191; // @[ToAXI4.scala:273:24]
wire idle_191 = ~count_191; // @[ToAXI4.scala:272:28, :274:26]
wire inc_191 = a_sel_191 & _inc_T_191; // @[Decoupled.scala:51:35]
wire _dec_T_382 = d_sel_191 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_191 = _dec_T_382 & _dec_T_383; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_764 = {1'h0, count_191} + {1'h0, inc_191}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_765 = _count_T_764[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_766 = {1'h0, _count_T_765} - {1'h0, dec_191}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_767 = _count_T_766[0]; // @[ToAXI4.scala:278:37]
wire _idStall_191_T = ~idle_191; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_191_T_3 = _idStall_191_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_191 = _idStall_191_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_192; // @[ToAXI4.scala:272:28]
wire _idStall_192_T_2 = count_192; // @[ToAXI4.scala:272:28, :286:44]
reg write_192; // @[ToAXI4.scala:273:24]
wire idle_192 = ~count_192; // @[ToAXI4.scala:272:28, :274:26]
wire inc_192 = a_sel_192 & _inc_T_192; // @[Decoupled.scala:51:35]
wire _dec_T_384 = d_sel_192 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_192 = _dec_T_384 & _dec_T_385; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_768 = {1'h0, count_192} + {1'h0, inc_192}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_769 = _count_T_768[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_770 = {1'h0, _count_T_769} - {1'h0, dec_192}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_771 = _count_T_770[0]; // @[ToAXI4.scala:278:37]
wire _idStall_192_T = ~idle_192; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_192_T_3 = _idStall_192_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_192 = _idStall_192_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_193; // @[ToAXI4.scala:272:28]
wire _idStall_193_T_2 = count_193; // @[ToAXI4.scala:272:28, :286:44]
reg write_193; // @[ToAXI4.scala:273:24]
wire idle_193 = ~count_193; // @[ToAXI4.scala:272:28, :274:26]
wire inc_193 = a_sel_193 & _inc_T_193; // @[Decoupled.scala:51:35]
wire _dec_T_386 = d_sel_193 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_193 = _dec_T_386 & _dec_T_387; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_772 = {1'h0, count_193} + {1'h0, inc_193}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_773 = _count_T_772[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_774 = {1'h0, _count_T_773} - {1'h0, dec_193}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_775 = _count_T_774[0]; // @[ToAXI4.scala:278:37]
wire _idStall_193_T = ~idle_193; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_193_T_3 = _idStall_193_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_193 = _idStall_193_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_194; // @[ToAXI4.scala:272:28]
wire _idStall_194_T_2 = count_194; // @[ToAXI4.scala:272:28, :286:44]
reg write_194; // @[ToAXI4.scala:273:24]
wire idle_194 = ~count_194; // @[ToAXI4.scala:272:28, :274:26]
wire inc_194 = a_sel_194 & _inc_T_194; // @[Decoupled.scala:51:35]
wire _dec_T_388 = d_sel_194 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_194 = _dec_T_388 & _dec_T_389; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_776 = {1'h0, count_194} + {1'h0, inc_194}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_777 = _count_T_776[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_778 = {1'h0, _count_T_777} - {1'h0, dec_194}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_779 = _count_T_778[0]; // @[ToAXI4.scala:278:37]
wire _idStall_194_T = ~idle_194; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_194_T_3 = _idStall_194_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_194 = _idStall_194_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_195; // @[ToAXI4.scala:272:28]
wire _idStall_195_T_2 = count_195; // @[ToAXI4.scala:272:28, :286:44]
reg write_195; // @[ToAXI4.scala:273:24]
wire idle_195 = ~count_195; // @[ToAXI4.scala:272:28, :274:26]
wire inc_195 = a_sel_195 & _inc_T_195; // @[Decoupled.scala:51:35]
wire _dec_T_390 = d_sel_195 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_195 = _dec_T_390 & _dec_T_391; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_780 = {1'h0, count_195} + {1'h0, inc_195}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_781 = _count_T_780[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_782 = {1'h0, _count_T_781} - {1'h0, dec_195}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_783 = _count_T_782[0]; // @[ToAXI4.scala:278:37]
wire _idStall_195_T = ~idle_195; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_195_T_3 = _idStall_195_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_195 = _idStall_195_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_196; // @[ToAXI4.scala:272:28]
wire _idStall_196_T_2 = count_196; // @[ToAXI4.scala:272:28, :286:44]
reg write_196; // @[ToAXI4.scala:273:24]
wire idle_196 = ~count_196; // @[ToAXI4.scala:272:28, :274:26]
wire inc_196 = a_sel_196 & _inc_T_196; // @[Decoupled.scala:51:35]
wire _dec_T_392 = d_sel_196 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_196 = _dec_T_392 & _dec_T_393; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_784 = {1'h0, count_196} + {1'h0, inc_196}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_785 = _count_T_784[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_786 = {1'h0, _count_T_785} - {1'h0, dec_196}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_787 = _count_T_786[0]; // @[ToAXI4.scala:278:37]
wire _idStall_196_T = ~idle_196; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_196_T_3 = _idStall_196_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_196 = _idStall_196_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_197; // @[ToAXI4.scala:272:28]
wire _idStall_197_T_2 = count_197; // @[ToAXI4.scala:272:28, :286:44]
reg write_197; // @[ToAXI4.scala:273:24]
wire idle_197 = ~count_197; // @[ToAXI4.scala:272:28, :274:26]
wire inc_197 = a_sel_197 & _inc_T_197; // @[Decoupled.scala:51:35]
wire _dec_T_394 = d_sel_197 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_197 = _dec_T_394 & _dec_T_395; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_788 = {1'h0, count_197} + {1'h0, inc_197}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_789 = _count_T_788[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_790 = {1'h0, _count_T_789} - {1'h0, dec_197}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_791 = _count_T_790[0]; // @[ToAXI4.scala:278:37]
wire _idStall_197_T = ~idle_197; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_197_T_3 = _idStall_197_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_197 = _idStall_197_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_198; // @[ToAXI4.scala:272:28]
wire _idStall_198_T_2 = count_198; // @[ToAXI4.scala:272:28, :286:44]
reg write_198; // @[ToAXI4.scala:273:24]
wire idle_198 = ~count_198; // @[ToAXI4.scala:272:28, :274:26]
wire inc_198 = a_sel_198 & _inc_T_198; // @[Decoupled.scala:51:35]
wire _dec_T_396 = d_sel_198 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_198 = _dec_T_396 & _dec_T_397; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_792 = {1'h0, count_198} + {1'h0, inc_198}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_793 = _count_T_792[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_794 = {1'h0, _count_T_793} - {1'h0, dec_198}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_795 = _count_T_794[0]; // @[ToAXI4.scala:278:37]
wire _idStall_198_T = ~idle_198; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_198_T_3 = _idStall_198_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_198 = _idStall_198_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_199; // @[ToAXI4.scala:272:28]
wire _idStall_199_T_2 = count_199; // @[ToAXI4.scala:272:28, :286:44]
reg write_199; // @[ToAXI4.scala:273:24]
wire idle_199 = ~count_199; // @[ToAXI4.scala:272:28, :274:26]
wire inc_199 = a_sel_199 & _inc_T_199; // @[Decoupled.scala:51:35]
wire _dec_T_398 = d_sel_199 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_199 = _dec_T_398 & _dec_T_399; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_796 = {1'h0, count_199} + {1'h0, inc_199}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_797 = _count_T_796[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_798 = {1'h0, _count_T_797} - {1'h0, dec_199}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_799 = _count_T_798[0]; // @[ToAXI4.scala:278:37]
wire _idStall_199_T = ~idle_199; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_199_T_3 = _idStall_199_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_199 = _idStall_199_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_200; // @[ToAXI4.scala:272:28]
wire _idStall_200_T_2 = count_200; // @[ToAXI4.scala:272:28, :286:44]
reg write_200; // @[ToAXI4.scala:273:24]
wire idle_200 = ~count_200; // @[ToAXI4.scala:272:28, :274:26]
wire inc_200 = a_sel_200 & _inc_T_200; // @[Decoupled.scala:51:35]
wire _dec_T_400 = d_sel_200 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_200 = _dec_T_400 & _dec_T_401; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_800 = {1'h0, count_200} + {1'h0, inc_200}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_801 = _count_T_800[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_802 = {1'h0, _count_T_801} - {1'h0, dec_200}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_803 = _count_T_802[0]; // @[ToAXI4.scala:278:37]
wire _idStall_200_T = ~idle_200; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_200_T_3 = _idStall_200_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_200 = _idStall_200_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_201; // @[ToAXI4.scala:272:28]
wire _idStall_201_T_2 = count_201; // @[ToAXI4.scala:272:28, :286:44]
reg write_201; // @[ToAXI4.scala:273:24]
wire idle_201 = ~count_201; // @[ToAXI4.scala:272:28, :274:26]
wire inc_201 = a_sel_201 & _inc_T_201; // @[Decoupled.scala:51:35]
wire _dec_T_402 = d_sel_201 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_201 = _dec_T_402 & _dec_T_403; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_804 = {1'h0, count_201} + {1'h0, inc_201}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_805 = _count_T_804[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_806 = {1'h0, _count_T_805} - {1'h0, dec_201}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_807 = _count_T_806[0]; // @[ToAXI4.scala:278:37]
wire _idStall_201_T = ~idle_201; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_201_T_3 = _idStall_201_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_201 = _idStall_201_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_202; // @[ToAXI4.scala:272:28]
wire _idStall_202_T_2 = count_202; // @[ToAXI4.scala:272:28, :286:44]
reg write_202; // @[ToAXI4.scala:273:24]
wire idle_202 = ~count_202; // @[ToAXI4.scala:272:28, :274:26]
wire inc_202 = a_sel_202 & _inc_T_202; // @[Decoupled.scala:51:35]
wire _dec_T_404 = d_sel_202 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_202 = _dec_T_404 & _dec_T_405; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_808 = {1'h0, count_202} + {1'h0, inc_202}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_809 = _count_T_808[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_810 = {1'h0, _count_T_809} - {1'h0, dec_202}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_811 = _count_T_810[0]; // @[ToAXI4.scala:278:37]
wire _idStall_202_T = ~idle_202; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_202_T_3 = _idStall_202_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_202 = _idStall_202_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_203; // @[ToAXI4.scala:272:28]
wire _idStall_203_T_2 = count_203; // @[ToAXI4.scala:272:28, :286:44]
reg write_203; // @[ToAXI4.scala:273:24]
wire idle_203 = ~count_203; // @[ToAXI4.scala:272:28, :274:26]
wire inc_203 = a_sel_203 & _inc_T_203; // @[Decoupled.scala:51:35]
wire _dec_T_406 = d_sel_203 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_203 = _dec_T_406 & _dec_T_407; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_812 = {1'h0, count_203} + {1'h0, inc_203}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_813 = _count_T_812[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_814 = {1'h0, _count_T_813} - {1'h0, dec_203}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_815 = _count_T_814[0]; // @[ToAXI4.scala:278:37]
wire _idStall_203_T = ~idle_203; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_203_T_3 = _idStall_203_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_203 = _idStall_203_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_204; // @[ToAXI4.scala:272:28]
wire _idStall_204_T_2 = count_204; // @[ToAXI4.scala:272:28, :286:44]
reg write_204; // @[ToAXI4.scala:273:24]
wire idle_204 = ~count_204; // @[ToAXI4.scala:272:28, :274:26]
wire inc_204 = a_sel_204 & _inc_T_204; // @[Decoupled.scala:51:35]
wire _dec_T_408 = d_sel_204 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_204 = _dec_T_408 & _dec_T_409; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_816 = {1'h0, count_204} + {1'h0, inc_204}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_817 = _count_T_816[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_818 = {1'h0, _count_T_817} - {1'h0, dec_204}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_819 = _count_T_818[0]; // @[ToAXI4.scala:278:37]
wire _idStall_204_T = ~idle_204; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_204_T_3 = _idStall_204_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_204 = _idStall_204_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_205; // @[ToAXI4.scala:272:28]
wire _idStall_205_T_2 = count_205; // @[ToAXI4.scala:272:28, :286:44]
reg write_205; // @[ToAXI4.scala:273:24]
wire idle_205 = ~count_205; // @[ToAXI4.scala:272:28, :274:26]
wire inc_205 = a_sel_205 & _inc_T_205; // @[Decoupled.scala:51:35]
wire _dec_T_410 = d_sel_205 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_205 = _dec_T_410 & _dec_T_411; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_820 = {1'h0, count_205} + {1'h0, inc_205}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_821 = _count_T_820[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_822 = {1'h0, _count_T_821} - {1'h0, dec_205}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_823 = _count_T_822[0]; // @[ToAXI4.scala:278:37]
wire _idStall_205_T = ~idle_205; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_205_T_3 = _idStall_205_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_205 = _idStall_205_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_206; // @[ToAXI4.scala:272:28]
wire _idStall_206_T_2 = count_206; // @[ToAXI4.scala:272:28, :286:44]
reg write_206; // @[ToAXI4.scala:273:24]
wire idle_206 = ~count_206; // @[ToAXI4.scala:272:28, :274:26]
wire inc_206 = a_sel_206 & _inc_T_206; // @[Decoupled.scala:51:35]
wire _dec_T_412 = d_sel_206 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_206 = _dec_T_412 & _dec_T_413; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_824 = {1'h0, count_206} + {1'h0, inc_206}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_825 = _count_T_824[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_826 = {1'h0, _count_T_825} - {1'h0, dec_206}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_827 = _count_T_826[0]; // @[ToAXI4.scala:278:37]
wire _idStall_206_T = ~idle_206; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_206_T_3 = _idStall_206_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_206 = _idStall_206_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_207; // @[ToAXI4.scala:272:28]
wire _idStall_207_T_2 = count_207; // @[ToAXI4.scala:272:28, :286:44]
reg write_207; // @[ToAXI4.scala:273:24]
wire idle_207 = ~count_207; // @[ToAXI4.scala:272:28, :274:26]
wire inc_207 = a_sel_207 & _inc_T_207; // @[Decoupled.scala:51:35]
wire _dec_T_414 = d_sel_207 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_207 = _dec_T_414 & _dec_T_415; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_828 = {1'h0, count_207} + {1'h0, inc_207}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_829 = _count_T_828[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_830 = {1'h0, _count_T_829} - {1'h0, dec_207}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_831 = _count_T_830[0]; // @[ToAXI4.scala:278:37]
wire _idStall_207_T = ~idle_207; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_207_T_3 = _idStall_207_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_207 = _idStall_207_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_208; // @[ToAXI4.scala:272:28]
wire _idStall_208_T_2 = count_208; // @[ToAXI4.scala:272:28, :286:44]
reg write_208; // @[ToAXI4.scala:273:24]
wire idle_208 = ~count_208; // @[ToAXI4.scala:272:28, :274:26]
wire inc_208 = a_sel_208 & _inc_T_208; // @[Decoupled.scala:51:35]
wire _dec_T_416 = d_sel_208 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_208 = _dec_T_416 & _dec_T_417; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_832 = {1'h0, count_208} + {1'h0, inc_208}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_833 = _count_T_832[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_834 = {1'h0, _count_T_833} - {1'h0, dec_208}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_835 = _count_T_834[0]; // @[ToAXI4.scala:278:37]
wire _idStall_208_T = ~idle_208; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_208_T_3 = _idStall_208_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_208 = _idStall_208_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_209; // @[ToAXI4.scala:272:28]
wire _idStall_209_T_2 = count_209; // @[ToAXI4.scala:272:28, :286:44]
reg write_209; // @[ToAXI4.scala:273:24]
wire idle_209 = ~count_209; // @[ToAXI4.scala:272:28, :274:26]
wire inc_209 = a_sel_209 & _inc_T_209; // @[Decoupled.scala:51:35]
wire _dec_T_418 = d_sel_209 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_209 = _dec_T_418 & _dec_T_419; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_836 = {1'h0, count_209} + {1'h0, inc_209}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_837 = _count_T_836[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_838 = {1'h0, _count_T_837} - {1'h0, dec_209}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_839 = _count_T_838[0]; // @[ToAXI4.scala:278:37]
wire _idStall_209_T = ~idle_209; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_209_T_3 = _idStall_209_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_209 = _idStall_209_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_210; // @[ToAXI4.scala:272:28]
wire _idStall_210_T_2 = count_210; // @[ToAXI4.scala:272:28, :286:44]
reg write_210; // @[ToAXI4.scala:273:24]
wire idle_210 = ~count_210; // @[ToAXI4.scala:272:28, :274:26]
wire inc_210 = a_sel_210 & _inc_T_210; // @[Decoupled.scala:51:35]
wire _dec_T_420 = d_sel_210 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_210 = _dec_T_420 & _dec_T_421; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_840 = {1'h0, count_210} + {1'h0, inc_210}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_841 = _count_T_840[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_842 = {1'h0, _count_T_841} - {1'h0, dec_210}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_843 = _count_T_842[0]; // @[ToAXI4.scala:278:37]
wire _idStall_210_T = ~idle_210; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_210_T_3 = _idStall_210_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_210 = _idStall_210_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_211; // @[ToAXI4.scala:272:28]
wire _idStall_211_T_2 = count_211; // @[ToAXI4.scala:272:28, :286:44]
reg write_211; // @[ToAXI4.scala:273:24]
wire idle_211 = ~count_211; // @[ToAXI4.scala:272:28, :274:26]
wire inc_211 = a_sel_211 & _inc_T_211; // @[Decoupled.scala:51:35]
wire _dec_T_422 = d_sel_211 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_211 = _dec_T_422 & _dec_T_423; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_844 = {1'h0, count_211} + {1'h0, inc_211}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_845 = _count_T_844[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_846 = {1'h0, _count_T_845} - {1'h0, dec_211}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_847 = _count_T_846[0]; // @[ToAXI4.scala:278:37]
wire _idStall_211_T = ~idle_211; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_211_T_3 = _idStall_211_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_211 = _idStall_211_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_212; // @[ToAXI4.scala:272:28]
wire _idStall_212_T_2 = count_212; // @[ToAXI4.scala:272:28, :286:44]
reg write_212; // @[ToAXI4.scala:273:24]
wire idle_212 = ~count_212; // @[ToAXI4.scala:272:28, :274:26]
wire inc_212 = a_sel_212 & _inc_T_212; // @[Decoupled.scala:51:35]
wire _dec_T_424 = d_sel_212 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_212 = _dec_T_424 & _dec_T_425; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_848 = {1'h0, count_212} + {1'h0, inc_212}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_849 = _count_T_848[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_850 = {1'h0, _count_T_849} - {1'h0, dec_212}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_851 = _count_T_850[0]; // @[ToAXI4.scala:278:37]
wire _idStall_212_T = ~idle_212; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_212_T_3 = _idStall_212_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_212 = _idStall_212_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_213; // @[ToAXI4.scala:272:28]
wire _idStall_213_T_2 = count_213; // @[ToAXI4.scala:272:28, :286:44]
reg write_213; // @[ToAXI4.scala:273:24]
wire idle_213 = ~count_213; // @[ToAXI4.scala:272:28, :274:26]
wire inc_213 = a_sel_213 & _inc_T_213; // @[Decoupled.scala:51:35]
wire _dec_T_426 = d_sel_213 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_213 = _dec_T_426 & _dec_T_427; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_852 = {1'h0, count_213} + {1'h0, inc_213}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_853 = _count_T_852[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_854 = {1'h0, _count_T_853} - {1'h0, dec_213}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_855 = _count_T_854[0]; // @[ToAXI4.scala:278:37]
wire _idStall_213_T = ~idle_213; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_213_T_3 = _idStall_213_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_213 = _idStall_213_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_214; // @[ToAXI4.scala:272:28]
wire _idStall_214_T_2 = count_214; // @[ToAXI4.scala:272:28, :286:44]
reg write_214; // @[ToAXI4.scala:273:24]
wire idle_214 = ~count_214; // @[ToAXI4.scala:272:28, :274:26]
wire inc_214 = a_sel_214 & _inc_T_214; // @[Decoupled.scala:51:35]
wire _dec_T_428 = d_sel_214 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_214 = _dec_T_428 & _dec_T_429; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_856 = {1'h0, count_214} + {1'h0, inc_214}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_857 = _count_T_856[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_858 = {1'h0, _count_T_857} - {1'h0, dec_214}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_859 = _count_T_858[0]; // @[ToAXI4.scala:278:37]
wire _idStall_214_T = ~idle_214; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_214_T_3 = _idStall_214_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_214 = _idStall_214_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_215; // @[ToAXI4.scala:272:28]
wire _idStall_215_T_2 = count_215; // @[ToAXI4.scala:272:28, :286:44]
reg write_215; // @[ToAXI4.scala:273:24]
wire idle_215 = ~count_215; // @[ToAXI4.scala:272:28, :274:26]
wire inc_215 = a_sel_215 & _inc_T_215; // @[Decoupled.scala:51:35]
wire _dec_T_430 = d_sel_215 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_215 = _dec_T_430 & _dec_T_431; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_860 = {1'h0, count_215} + {1'h0, inc_215}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_861 = _count_T_860[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_862 = {1'h0, _count_T_861} - {1'h0, dec_215}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_863 = _count_T_862[0]; // @[ToAXI4.scala:278:37]
wire _idStall_215_T = ~idle_215; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_215_T_3 = _idStall_215_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_215 = _idStall_215_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_216; // @[ToAXI4.scala:272:28]
wire _idStall_216_T_2 = count_216; // @[ToAXI4.scala:272:28, :286:44]
reg write_216; // @[ToAXI4.scala:273:24]
wire idle_216 = ~count_216; // @[ToAXI4.scala:272:28, :274:26]
wire inc_216 = a_sel_216 & _inc_T_216; // @[Decoupled.scala:51:35]
wire _dec_T_432 = d_sel_216 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_216 = _dec_T_432 & _dec_T_433; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_864 = {1'h0, count_216} + {1'h0, inc_216}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_865 = _count_T_864[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_866 = {1'h0, _count_T_865} - {1'h0, dec_216}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_867 = _count_T_866[0]; // @[ToAXI4.scala:278:37]
wire _idStall_216_T = ~idle_216; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_216_T_3 = _idStall_216_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_216 = _idStall_216_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_217; // @[ToAXI4.scala:272:28]
wire _idStall_217_T_2 = count_217; // @[ToAXI4.scala:272:28, :286:44]
reg write_217; // @[ToAXI4.scala:273:24]
wire idle_217 = ~count_217; // @[ToAXI4.scala:272:28, :274:26]
wire inc_217 = a_sel_217 & _inc_T_217; // @[Decoupled.scala:51:35]
wire _dec_T_434 = d_sel_217 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_217 = _dec_T_434 & _dec_T_435; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_868 = {1'h0, count_217} + {1'h0, inc_217}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_869 = _count_T_868[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_870 = {1'h0, _count_T_869} - {1'h0, dec_217}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_871 = _count_T_870[0]; // @[ToAXI4.scala:278:37]
wire _idStall_217_T = ~idle_217; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_217_T_3 = _idStall_217_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_217 = _idStall_217_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_218; // @[ToAXI4.scala:272:28]
wire _idStall_218_T_2 = count_218; // @[ToAXI4.scala:272:28, :286:44]
reg write_218; // @[ToAXI4.scala:273:24]
wire idle_218 = ~count_218; // @[ToAXI4.scala:272:28, :274:26]
wire inc_218 = a_sel_218 & _inc_T_218; // @[Decoupled.scala:51:35]
wire _dec_T_436 = d_sel_218 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_218 = _dec_T_436 & _dec_T_437; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_872 = {1'h0, count_218} + {1'h0, inc_218}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_873 = _count_T_872[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_874 = {1'h0, _count_T_873} - {1'h0, dec_218}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_875 = _count_T_874[0]; // @[ToAXI4.scala:278:37]
wire _idStall_218_T = ~idle_218; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_218_T_3 = _idStall_218_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_218 = _idStall_218_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_219; // @[ToAXI4.scala:272:28]
wire _idStall_219_T_2 = count_219; // @[ToAXI4.scala:272:28, :286:44]
reg write_219; // @[ToAXI4.scala:273:24]
wire idle_219 = ~count_219; // @[ToAXI4.scala:272:28, :274:26]
wire inc_219 = a_sel_219 & _inc_T_219; // @[Decoupled.scala:51:35]
wire _dec_T_438 = d_sel_219 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_219 = _dec_T_438 & _dec_T_439; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_876 = {1'h0, count_219} + {1'h0, inc_219}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_877 = _count_T_876[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_878 = {1'h0, _count_T_877} - {1'h0, dec_219}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_879 = _count_T_878[0]; // @[ToAXI4.scala:278:37]
wire _idStall_219_T = ~idle_219; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_219_T_3 = _idStall_219_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_219 = _idStall_219_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_220; // @[ToAXI4.scala:272:28]
wire _idStall_220_T_2 = count_220; // @[ToAXI4.scala:272:28, :286:44]
reg write_220; // @[ToAXI4.scala:273:24]
wire idle_220 = ~count_220; // @[ToAXI4.scala:272:28, :274:26]
wire inc_220 = a_sel_220 & _inc_T_220; // @[Decoupled.scala:51:35]
wire _dec_T_440 = d_sel_220 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_220 = _dec_T_440 & _dec_T_441; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_880 = {1'h0, count_220} + {1'h0, inc_220}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_881 = _count_T_880[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_882 = {1'h0, _count_T_881} - {1'h0, dec_220}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_883 = _count_T_882[0]; // @[ToAXI4.scala:278:37]
wire _idStall_220_T = ~idle_220; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_220_T_3 = _idStall_220_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_220 = _idStall_220_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_221; // @[ToAXI4.scala:272:28]
wire _idStall_221_T_2 = count_221; // @[ToAXI4.scala:272:28, :286:44]
reg write_221; // @[ToAXI4.scala:273:24]
wire idle_221 = ~count_221; // @[ToAXI4.scala:272:28, :274:26]
wire inc_221 = a_sel_221 & _inc_T_221; // @[Decoupled.scala:51:35]
wire _dec_T_442 = d_sel_221 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_221 = _dec_T_442 & _dec_T_443; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_884 = {1'h0, count_221} + {1'h0, inc_221}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_885 = _count_T_884[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_886 = {1'h0, _count_T_885} - {1'h0, dec_221}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_887 = _count_T_886[0]; // @[ToAXI4.scala:278:37]
wire _idStall_221_T = ~idle_221; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_221_T_3 = _idStall_221_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_221 = _idStall_221_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_222; // @[ToAXI4.scala:272:28]
wire _idStall_222_T_2 = count_222; // @[ToAXI4.scala:272:28, :286:44]
reg write_222; // @[ToAXI4.scala:273:24]
wire idle_222 = ~count_222; // @[ToAXI4.scala:272:28, :274:26]
wire inc_222 = a_sel_222 & _inc_T_222; // @[Decoupled.scala:51:35]
wire _dec_T_444 = d_sel_222 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_222 = _dec_T_444 & _dec_T_445; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_888 = {1'h0, count_222} + {1'h0, inc_222}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_889 = _count_T_888[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_890 = {1'h0, _count_T_889} - {1'h0, dec_222}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_891 = _count_T_890[0]; // @[ToAXI4.scala:278:37]
wire _idStall_222_T = ~idle_222; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_222_T_3 = _idStall_222_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_222 = _idStall_222_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_223; // @[ToAXI4.scala:272:28]
wire _idStall_223_T_2 = count_223; // @[ToAXI4.scala:272:28, :286:44]
reg write_223; // @[ToAXI4.scala:273:24]
wire idle_223 = ~count_223; // @[ToAXI4.scala:272:28, :274:26]
wire inc_223 = a_sel_223 & _inc_T_223; // @[Decoupled.scala:51:35]
wire _dec_T_446 = d_sel_223 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_223 = _dec_T_446 & _dec_T_447; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_892 = {1'h0, count_223} + {1'h0, inc_223}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_893 = _count_T_892[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_894 = {1'h0, _count_T_893} - {1'h0, dec_223}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_895 = _count_T_894[0]; // @[ToAXI4.scala:278:37]
wire _idStall_223_T = ~idle_223; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_223_T_3 = _idStall_223_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_223 = _idStall_223_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_224; // @[ToAXI4.scala:272:28]
wire _idStall_224_T_2 = count_224; // @[ToAXI4.scala:272:28, :286:44]
reg write_224; // @[ToAXI4.scala:273:24]
wire idle_224 = ~count_224; // @[ToAXI4.scala:272:28, :274:26]
wire inc_224 = a_sel_224 & _inc_T_224; // @[Decoupled.scala:51:35]
wire _dec_T_448 = d_sel_224 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_224 = _dec_T_448 & _dec_T_449; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_896 = {1'h0, count_224} + {1'h0, inc_224}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_897 = _count_T_896[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_898 = {1'h0, _count_T_897} - {1'h0, dec_224}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_899 = _count_T_898[0]; // @[ToAXI4.scala:278:37]
wire _idStall_224_T = ~idle_224; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_224_T_3 = _idStall_224_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_224 = _idStall_224_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_225; // @[ToAXI4.scala:272:28]
wire _idStall_225_T_2 = count_225; // @[ToAXI4.scala:272:28, :286:44]
reg write_225; // @[ToAXI4.scala:273:24]
wire idle_225 = ~count_225; // @[ToAXI4.scala:272:28, :274:26]
wire inc_225 = a_sel_225 & _inc_T_225; // @[Decoupled.scala:51:35]
wire _dec_T_450 = d_sel_225 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_225 = _dec_T_450 & _dec_T_451; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_900 = {1'h0, count_225} + {1'h0, inc_225}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_901 = _count_T_900[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_902 = {1'h0, _count_T_901} - {1'h0, dec_225}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_903 = _count_T_902[0]; // @[ToAXI4.scala:278:37]
wire _idStall_225_T = ~idle_225; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_225_T_3 = _idStall_225_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_225 = _idStall_225_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_226; // @[ToAXI4.scala:272:28]
wire _idStall_226_T_2 = count_226; // @[ToAXI4.scala:272:28, :286:44]
reg write_226; // @[ToAXI4.scala:273:24]
wire idle_226 = ~count_226; // @[ToAXI4.scala:272:28, :274:26]
wire inc_226 = a_sel_226 & _inc_T_226; // @[Decoupled.scala:51:35]
wire _dec_T_452 = d_sel_226 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_226 = _dec_T_452 & _dec_T_453; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_904 = {1'h0, count_226} + {1'h0, inc_226}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_905 = _count_T_904[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_906 = {1'h0, _count_T_905} - {1'h0, dec_226}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_907 = _count_T_906[0]; // @[ToAXI4.scala:278:37]
wire _idStall_226_T = ~idle_226; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_226_T_3 = _idStall_226_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_226 = _idStall_226_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_227; // @[ToAXI4.scala:272:28]
wire _idStall_227_T_2 = count_227; // @[ToAXI4.scala:272:28, :286:44]
reg write_227; // @[ToAXI4.scala:273:24]
wire idle_227 = ~count_227; // @[ToAXI4.scala:272:28, :274:26]
wire inc_227 = a_sel_227 & _inc_T_227; // @[Decoupled.scala:51:35]
wire _dec_T_454 = d_sel_227 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_227 = _dec_T_454 & _dec_T_455; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_908 = {1'h0, count_227} + {1'h0, inc_227}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_909 = _count_T_908[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_910 = {1'h0, _count_T_909} - {1'h0, dec_227}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_911 = _count_T_910[0]; // @[ToAXI4.scala:278:37]
wire _idStall_227_T = ~idle_227; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_227_T_3 = _idStall_227_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_227 = _idStall_227_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_228; // @[ToAXI4.scala:272:28]
wire _idStall_228_T_2 = count_228; // @[ToAXI4.scala:272:28, :286:44]
reg write_228; // @[ToAXI4.scala:273:24]
wire idle_228 = ~count_228; // @[ToAXI4.scala:272:28, :274:26]
wire inc_228 = a_sel_228 & _inc_T_228; // @[Decoupled.scala:51:35]
wire _dec_T_456 = d_sel_228 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_228 = _dec_T_456 & _dec_T_457; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_912 = {1'h0, count_228} + {1'h0, inc_228}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_913 = _count_T_912[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_914 = {1'h0, _count_T_913} - {1'h0, dec_228}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_915 = _count_T_914[0]; // @[ToAXI4.scala:278:37]
wire _idStall_228_T = ~idle_228; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_228_T_3 = _idStall_228_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_228 = _idStall_228_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_229; // @[ToAXI4.scala:272:28]
wire _idStall_229_T_2 = count_229; // @[ToAXI4.scala:272:28, :286:44]
reg write_229; // @[ToAXI4.scala:273:24]
wire idle_229 = ~count_229; // @[ToAXI4.scala:272:28, :274:26]
wire inc_229 = a_sel_229 & _inc_T_229; // @[Decoupled.scala:51:35]
wire _dec_T_458 = d_sel_229 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_229 = _dec_T_458 & _dec_T_459; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_916 = {1'h0, count_229} + {1'h0, inc_229}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_917 = _count_T_916[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_918 = {1'h0, _count_T_917} - {1'h0, dec_229}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_919 = _count_T_918[0]; // @[ToAXI4.scala:278:37]
wire _idStall_229_T = ~idle_229; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_229_T_3 = _idStall_229_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_229 = _idStall_229_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_230; // @[ToAXI4.scala:272:28]
wire _idStall_230_T_2 = count_230; // @[ToAXI4.scala:272:28, :286:44]
reg write_230; // @[ToAXI4.scala:273:24]
wire idle_230 = ~count_230; // @[ToAXI4.scala:272:28, :274:26]
wire inc_230 = a_sel_230 & _inc_T_230; // @[Decoupled.scala:51:35]
wire _dec_T_460 = d_sel_230 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_230 = _dec_T_460 & _dec_T_461; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_920 = {1'h0, count_230} + {1'h0, inc_230}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_921 = _count_T_920[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_922 = {1'h0, _count_T_921} - {1'h0, dec_230}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_923 = _count_T_922[0]; // @[ToAXI4.scala:278:37]
wire _idStall_230_T = ~idle_230; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_230_T_3 = _idStall_230_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_230 = _idStall_230_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_231; // @[ToAXI4.scala:272:28]
wire _idStall_231_T_2 = count_231; // @[ToAXI4.scala:272:28, :286:44]
reg write_231; // @[ToAXI4.scala:273:24]
wire idle_231 = ~count_231; // @[ToAXI4.scala:272:28, :274:26]
wire inc_231 = a_sel_231 & _inc_T_231; // @[Decoupled.scala:51:35]
wire _dec_T_462 = d_sel_231 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_231 = _dec_T_462 & _dec_T_463; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_924 = {1'h0, count_231} + {1'h0, inc_231}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_925 = _count_T_924[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_926 = {1'h0, _count_T_925} - {1'h0, dec_231}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_927 = _count_T_926[0]; // @[ToAXI4.scala:278:37]
wire _idStall_231_T = ~idle_231; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_231_T_3 = _idStall_231_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_231 = _idStall_231_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_232; // @[ToAXI4.scala:272:28]
wire _idStall_232_T_2 = count_232; // @[ToAXI4.scala:272:28, :286:44]
reg write_232; // @[ToAXI4.scala:273:24]
wire idle_232 = ~count_232; // @[ToAXI4.scala:272:28, :274:26]
wire inc_232 = a_sel_232 & _inc_T_232; // @[Decoupled.scala:51:35]
wire _dec_T_464 = d_sel_232 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_232 = _dec_T_464 & _dec_T_465; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_928 = {1'h0, count_232} + {1'h0, inc_232}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_929 = _count_T_928[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_930 = {1'h0, _count_T_929} - {1'h0, dec_232}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_931 = _count_T_930[0]; // @[ToAXI4.scala:278:37]
wire _idStall_232_T = ~idle_232; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_232_T_3 = _idStall_232_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_232 = _idStall_232_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_233; // @[ToAXI4.scala:272:28]
wire _idStall_233_T_2 = count_233; // @[ToAXI4.scala:272:28, :286:44]
reg write_233; // @[ToAXI4.scala:273:24]
wire idle_233 = ~count_233; // @[ToAXI4.scala:272:28, :274:26]
wire inc_233 = a_sel_233 & _inc_T_233; // @[Decoupled.scala:51:35]
wire _dec_T_466 = d_sel_233 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_233 = _dec_T_466 & _dec_T_467; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_932 = {1'h0, count_233} + {1'h0, inc_233}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_933 = _count_T_932[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_934 = {1'h0, _count_T_933} - {1'h0, dec_233}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_935 = _count_T_934[0]; // @[ToAXI4.scala:278:37]
wire _idStall_233_T = ~idle_233; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_233_T_3 = _idStall_233_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_233 = _idStall_233_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_234; // @[ToAXI4.scala:272:28]
wire _idStall_234_T_2 = count_234; // @[ToAXI4.scala:272:28, :286:44]
reg write_234; // @[ToAXI4.scala:273:24]
wire idle_234 = ~count_234; // @[ToAXI4.scala:272:28, :274:26]
wire inc_234 = a_sel_234 & _inc_T_234; // @[Decoupled.scala:51:35]
wire _dec_T_468 = d_sel_234 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_234 = _dec_T_468 & _dec_T_469; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_936 = {1'h0, count_234} + {1'h0, inc_234}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_937 = _count_T_936[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_938 = {1'h0, _count_T_937} - {1'h0, dec_234}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_939 = _count_T_938[0]; // @[ToAXI4.scala:278:37]
wire _idStall_234_T = ~idle_234; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_234_T_3 = _idStall_234_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_234 = _idStall_234_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_235; // @[ToAXI4.scala:272:28]
wire _idStall_235_T_2 = count_235; // @[ToAXI4.scala:272:28, :286:44]
reg write_235; // @[ToAXI4.scala:273:24]
wire idle_235 = ~count_235; // @[ToAXI4.scala:272:28, :274:26]
wire inc_235 = a_sel_235 & _inc_T_235; // @[Decoupled.scala:51:35]
wire _dec_T_470 = d_sel_235 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_235 = _dec_T_470 & _dec_T_471; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_940 = {1'h0, count_235} + {1'h0, inc_235}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_941 = _count_T_940[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_942 = {1'h0, _count_T_941} - {1'h0, dec_235}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_943 = _count_T_942[0]; // @[ToAXI4.scala:278:37]
wire _idStall_235_T = ~idle_235; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_235_T_3 = _idStall_235_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_235 = _idStall_235_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_236; // @[ToAXI4.scala:272:28]
wire _idStall_236_T_2 = count_236; // @[ToAXI4.scala:272:28, :286:44]
reg write_236; // @[ToAXI4.scala:273:24]
wire idle_236 = ~count_236; // @[ToAXI4.scala:272:28, :274:26]
wire inc_236 = a_sel_236 & _inc_T_236; // @[Decoupled.scala:51:35]
wire _dec_T_472 = d_sel_236 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_236 = _dec_T_472 & _dec_T_473; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_944 = {1'h0, count_236} + {1'h0, inc_236}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_945 = _count_T_944[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_946 = {1'h0, _count_T_945} - {1'h0, dec_236}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_947 = _count_T_946[0]; // @[ToAXI4.scala:278:37]
wire _idStall_236_T = ~idle_236; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_236_T_3 = _idStall_236_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_236 = _idStall_236_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_237; // @[ToAXI4.scala:272:28]
wire _idStall_237_T_2 = count_237; // @[ToAXI4.scala:272:28, :286:44]
reg write_237; // @[ToAXI4.scala:273:24]
wire idle_237 = ~count_237; // @[ToAXI4.scala:272:28, :274:26]
wire inc_237 = a_sel_237 & _inc_T_237; // @[Decoupled.scala:51:35]
wire _dec_T_474 = d_sel_237 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_237 = _dec_T_474 & _dec_T_475; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_948 = {1'h0, count_237} + {1'h0, inc_237}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_949 = _count_T_948[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_950 = {1'h0, _count_T_949} - {1'h0, dec_237}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_951 = _count_T_950[0]; // @[ToAXI4.scala:278:37]
wire _idStall_237_T = ~idle_237; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_237_T_3 = _idStall_237_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_237 = _idStall_237_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_238; // @[ToAXI4.scala:272:28]
wire _idStall_238_T_2 = count_238; // @[ToAXI4.scala:272:28, :286:44]
reg write_238; // @[ToAXI4.scala:273:24]
wire idle_238 = ~count_238; // @[ToAXI4.scala:272:28, :274:26]
wire inc_238 = a_sel_238 & _inc_T_238; // @[Decoupled.scala:51:35]
wire _dec_T_476 = d_sel_238 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_238 = _dec_T_476 & _dec_T_477; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_952 = {1'h0, count_238} + {1'h0, inc_238}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_953 = _count_T_952[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_954 = {1'h0, _count_T_953} - {1'h0, dec_238}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_955 = _count_T_954[0]; // @[ToAXI4.scala:278:37]
wire _idStall_238_T = ~idle_238; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_238_T_3 = _idStall_238_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_238 = _idStall_238_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_239; // @[ToAXI4.scala:272:28]
wire _idStall_239_T_2 = count_239; // @[ToAXI4.scala:272:28, :286:44]
reg write_239; // @[ToAXI4.scala:273:24]
wire idle_239 = ~count_239; // @[ToAXI4.scala:272:28, :274:26]
wire inc_239 = a_sel_239 & _inc_T_239; // @[Decoupled.scala:51:35]
wire _dec_T_478 = d_sel_239 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_239 = _dec_T_478 & _dec_T_479; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_956 = {1'h0, count_239} + {1'h0, inc_239}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_957 = _count_T_956[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_958 = {1'h0, _count_T_957} - {1'h0, dec_239}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_959 = _count_T_958[0]; // @[ToAXI4.scala:278:37]
wire _idStall_239_T = ~idle_239; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_239_T_3 = _idStall_239_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_239 = _idStall_239_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_240; // @[ToAXI4.scala:272:28]
wire _idStall_240_T_2 = count_240; // @[ToAXI4.scala:272:28, :286:44]
reg write_240; // @[ToAXI4.scala:273:24]
wire idle_240 = ~count_240; // @[ToAXI4.scala:272:28, :274:26]
wire inc_240 = a_sel_240 & _inc_T_240; // @[Decoupled.scala:51:35]
wire _dec_T_480 = d_sel_240 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_240 = _dec_T_480 & _dec_T_481; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_960 = {1'h0, count_240} + {1'h0, inc_240}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_961 = _count_T_960[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_962 = {1'h0, _count_T_961} - {1'h0, dec_240}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_963 = _count_T_962[0]; // @[ToAXI4.scala:278:37]
wire _idStall_240_T = ~idle_240; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_240_T_3 = _idStall_240_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_240 = _idStall_240_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_241; // @[ToAXI4.scala:272:28]
wire _idStall_241_T_2 = count_241; // @[ToAXI4.scala:272:28, :286:44]
reg write_241; // @[ToAXI4.scala:273:24]
wire idle_241 = ~count_241; // @[ToAXI4.scala:272:28, :274:26]
wire inc_241 = a_sel_241 & _inc_T_241; // @[Decoupled.scala:51:35]
wire _dec_T_482 = d_sel_241 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_241 = _dec_T_482 & _dec_T_483; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_964 = {1'h0, count_241} + {1'h0, inc_241}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_965 = _count_T_964[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_966 = {1'h0, _count_T_965} - {1'h0, dec_241}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_967 = _count_T_966[0]; // @[ToAXI4.scala:278:37]
wire _idStall_241_T = ~idle_241; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_241_T_3 = _idStall_241_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_241 = _idStall_241_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_242; // @[ToAXI4.scala:272:28]
wire _idStall_242_T_2 = count_242; // @[ToAXI4.scala:272:28, :286:44]
reg write_242; // @[ToAXI4.scala:273:24]
wire idle_242 = ~count_242; // @[ToAXI4.scala:272:28, :274:26]
wire inc_242 = a_sel_242 & _inc_T_242; // @[Decoupled.scala:51:35]
wire _dec_T_484 = d_sel_242 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_242 = _dec_T_484 & _dec_T_485; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_968 = {1'h0, count_242} + {1'h0, inc_242}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_969 = _count_T_968[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_970 = {1'h0, _count_T_969} - {1'h0, dec_242}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_971 = _count_T_970[0]; // @[ToAXI4.scala:278:37]
wire _idStall_242_T = ~idle_242; // @[ToAXI4.scala:274:26, :286:15]
assign _idStall_242_T_3 = _idStall_242_T_2; // @[ToAXI4.scala:286:{34,44}]
assign idStall_242 = _idStall_242_T_3; // @[ToAXI4.scala:115:32, :286:34]
reg count_243; // @[ToAXI4.scala:272:28]
wire _idStall_243_T_2 = count_243; // @[ToAXI4.scala:272:28, :286:44]
reg write_243; // @[ToAXI4.scala:273:24]
wire idle_243 = ~count_243; // @[ToAXI4.scala:272:28, :274:26]
wire inc_243 = a_sel_243 & _inc_T_243; // @[Decoupled.scala:51:35]
wire _dec_T_486 = d_sel_243 & d_last; // @[ToAXI4.scala:261:93, :262:23, :277:22]
wire dec_243 = _dec_T_486 & _dec_T_487; // @[Decoupled.scala:51:35]
wire [1:0] _count_T_972 = {1'h0, count_243} + {1'h0, inc_243}; // @[ToAXI4.scala:272:28, :276:22, :278:24]
wire _count_T_973 = _count_T_972[0]; // @[ToAXI4.scala:278:24]
wire [1:0] _count_T_974 = {1'h0, _count_T_973} - {1'h0, dec_243}; // @[ToAXI4.scala:277:32, :278:{24,37}]
wire _count_T_975 = _count_T_974[0]; // @[ToAXI4.scala:278:37] |
Generate the Verilog code corresponding to the following Chisel files.
File IngressUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
class IngressUnit(
ingressNodeId: Int,
cParam: IngressChannelParams,
outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean,
combineSAST: Boolean,
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
class IngressUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(Decoupled(new IngressFlit(cParam.payloadBits)))
}
val io = IO(new IngressUnitIO)
val route_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2))
val route_q = Module(new Queue(new RouteComputerResp(outParams, egressParams), 2,
flow=combineRCVA))
assert(!(io.in.valid && !cParam.possibleFlows.toSeq.map(_.egressId.U === io.in.bits.egress_id).orR))
route_buffer.io.enq.bits.head := io.in.bits.head
route_buffer.io.enq.bits.tail := io.in.bits.tail
val flows = cParam.possibleFlows.toSeq
if (flows.size == 0) {
route_buffer.io.enq.bits.flow := DontCare
} else {
route_buffer.io.enq.bits.flow.ingress_node := cParam.destId.U
route_buffer.io.enq.bits.flow.ingress_node_id := ingressNodeId.U
route_buffer.io.enq.bits.flow.vnet_id := cParam.vNetId.U
route_buffer.io.enq.bits.flow.egress_node := Mux1H(
flows.map(_.egressId.U === io.in.bits.egress_id),
flows.map(_.egressNode.U)
)
route_buffer.io.enq.bits.flow.egress_node_id := Mux1H(
flows.map(_.egressId.U === io.in.bits.egress_id),
flows.map(_.egressNodeId.U)
)
}
route_buffer.io.enq.bits.payload := io.in.bits.payload
route_buffer.io.enq.bits.virt_channel_id := DontCare
io.router_req.bits.src_virt_id := 0.U
io.router_req.bits.flow := route_buffer.io.enq.bits.flow
val at_dest = route_buffer.io.enq.bits.flow.egress_node === nodeId.U
route_buffer.io.enq.valid := io.in.valid && (
io.router_req.ready || !io.in.bits.head || at_dest)
io.router_req.valid := io.in.valid && route_buffer.io.enq.ready && io.in.bits.head && !at_dest
io.in.ready := route_buffer.io.enq.ready && (
io.router_req.ready || !io.in.bits.head || at_dest)
route_q.io.enq.valid := io.router_req.fire
route_q.io.enq.bits := io.router_resp
when (io.in.fire && io.in.bits.head && at_dest) {
route_q.io.enq.valid := true.B
route_q.io.enq.bits.vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (egressParams(o).egressId.U === io.in.bits.egress_id) {
route_q.io.enq.bits.vc_sel(o+nOutputs)(0) := true.B
}
}
}
assert(!(route_q.io.enq.valid && !route_q.io.enq.ready))
val vcalloc_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2))
val vcalloc_q = Module(new Queue(new VCAllocResp(outParams, egressParams),
1, pipe=true))
vcalloc_buffer.io.enq.bits := route_buffer.io.deq.bits
io.vcalloc_req.bits.vc_sel := route_q.io.deq.bits.vc_sel
io.vcalloc_req.bits.flow := route_buffer.io.deq.bits.flow
io.vcalloc_req.bits.in_vc := 0.U
val head = route_buffer.io.deq.bits.head
val tail = route_buffer.io.deq.bits.tail
vcalloc_buffer.io.enq.valid := (route_buffer.io.deq.valid &&
(route_q.io.deq.valid || !head) &&
(io.vcalloc_req.ready || !head)
)
io.vcalloc_req.valid := (route_buffer.io.deq.valid && route_q.io.deq.valid &&
head && vcalloc_buffer.io.enq.ready && vcalloc_q.io.enq.ready)
route_buffer.io.deq.ready := (vcalloc_buffer.io.enq.ready &&
(route_q.io.deq.valid || !head) &&
(io.vcalloc_req.ready || !head) &&
(vcalloc_q.io.enq.ready || !head))
route_q.io.deq.ready := (route_buffer.io.deq.fire && tail)
vcalloc_q.io.enq.valid := io.vcalloc_req.fire
vcalloc_q.io.enq.bits := io.vcalloc_resp
assert(!(vcalloc_q.io.enq.valid && !vcalloc_q.io.enq.ready))
io.salloc_req(0).bits.vc_sel := vcalloc_q.io.deq.bits.vc_sel
io.salloc_req(0).bits.tail := vcalloc_buffer.io.deq.bits.tail
val c = (vcalloc_q.io.deq.bits.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
val vcalloc_tail = vcalloc_buffer.io.deq.bits.tail
io.salloc_req(0).valid := vcalloc_buffer.io.deq.valid && vcalloc_q.io.deq.valid && c && !io.block
vcalloc_buffer.io.deq.ready := io.salloc_req(0).ready && vcalloc_q.io.deq.valid && c && !io.block
vcalloc_q.io.deq.ready := vcalloc_tail && vcalloc_buffer.io.deq.fire
val out_bundle = if (combineSAST) {
Wire(Valid(new SwitchBundle(outParams, egressParams)))
} else {
Reg(Valid(new SwitchBundle(outParams, egressParams)))
}
io.out(0) := out_bundle
out_bundle.valid := vcalloc_buffer.io.deq.fire
out_bundle.bits.flit := vcalloc_buffer.io.deq.bits
out_bundle.bits.flit.virt_channel_id := 0.U
val out_channel_oh = vcalloc_q.io.deq.bits.vc_sel.map(_.reduce(_||_)).toSeq
out_bundle.bits.out_virt_channel := Mux1H(out_channel_oh,
vcalloc_q.io.deq.bits.vc_sel.map(v => OHToUInt(v)).toSeq)
io.debug.va_stall := io.vcalloc_req.valid && !io.vcalloc_req.ready
io.debug.sa_stall := io.salloc_req(0).valid && !io.salloc_req(0).ready
// TODO: We should not generate input/ingress/output/egress units for untraversable channels
if (!cParam.traversable) {
io.in.ready := false.B
io.router_req.valid := false.B
io.router_req.bits := DontCare
io.vcalloc_req.valid := false.B
io.vcalloc_req.bits := DontCare
io.salloc_req.foreach(_.valid := false.B)
io.salloc_req.foreach(_.bits := DontCare)
io.out.foreach(_.valid := false.B)
io.out.foreach(_.bits := DontCare)
}
}
| module IngressUnit_38( // @[IngressUnit.scala:11:7]
input clock, // @[IngressUnit.scala:11:7]
input reset, // @[IngressUnit.scala:11:7]
output [3:0] io_router_req_bits_flow_egress_node, // @[IngressUnit.scala:24:14]
output [1:0] io_router_req_bits_flow_egress_node_id, // @[IngressUnit.scala:24:14]
input io_router_resp_vc_sel_3_0, // @[IngressUnit.scala:24:14]
input io_router_resp_vc_sel_3_1, // @[IngressUnit.scala:24:14]
input io_router_resp_vc_sel_2_0, // @[IngressUnit.scala:24:14]
input io_router_resp_vc_sel_2_1, // @[IngressUnit.scala:24:14]
input io_router_resp_vc_sel_1_0, // @[IngressUnit.scala:24:14]
input io_router_resp_vc_sel_1_1, // @[IngressUnit.scala:24:14]
input io_router_resp_vc_sel_0_0, // @[IngressUnit.scala:24:14]
input io_router_resp_vc_sel_0_1, // @[IngressUnit.scala:24:14]
input io_vcalloc_req_ready, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_valid, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_4_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_3_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_3_1, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_2_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_2_1, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_1_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_1_1, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_1, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_4_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_3_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_3_1, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_2_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_2_1, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_1_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_1_1, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_1, // @[IngressUnit.scala:24:14]
input io_out_credit_available_4_0, // @[IngressUnit.scala:24:14]
input io_out_credit_available_3_0, // @[IngressUnit.scala:24:14]
input io_out_credit_available_3_1, // @[IngressUnit.scala:24:14]
input io_out_credit_available_2_0, // @[IngressUnit.scala:24:14]
input io_out_credit_available_2_1, // @[IngressUnit.scala:24:14]
input io_out_credit_available_1_1, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_1, // @[IngressUnit.scala:24:14]
input io_salloc_req_0_ready, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_valid, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_4_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_3_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_3_1, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_2_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_2_1, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_1_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_1_1, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_1, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_tail, // @[IngressUnit.scala:24:14]
output io_out_0_valid, // @[IngressUnit.scala:24:14]
output io_out_0_bits_flit_head, // @[IngressUnit.scala:24:14]
output io_out_0_bits_flit_tail, // @[IngressUnit.scala:24:14]
output [36:0] io_out_0_bits_flit_payload, // @[IngressUnit.scala:24:14]
output io_out_0_bits_flit_flow_vnet_id, // @[IngressUnit.scala:24:14]
output [3:0] io_out_0_bits_flit_flow_ingress_node, // @[IngressUnit.scala:24:14]
output [1:0] io_out_0_bits_flit_flow_ingress_node_id, // @[IngressUnit.scala:24:14]
output [3:0] io_out_0_bits_flit_flow_egress_node, // @[IngressUnit.scala:24:14]
output [1:0] io_out_0_bits_flit_flow_egress_node_id, // @[IngressUnit.scala:24:14]
output io_out_0_bits_out_virt_channel, // @[IngressUnit.scala:24:14]
output io_in_ready, // @[IngressUnit.scala:24:14]
input io_in_valid, // @[IngressUnit.scala:24:14]
input io_in_bits_head, // @[IngressUnit.scala:24:14]
input io_in_bits_tail, // @[IngressUnit.scala:24:14]
input [36:0] io_in_bits_payload, // @[IngressUnit.scala:24:14]
input [3:0] io_in_bits_egress_id // @[IngressUnit.scala:24:14]
);
wire _vcalloc_q_io_enq_ready; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_valid; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_4_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_3_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_3_1; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_2_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_2_1; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_1_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_1_1; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_1; // @[IngressUnit.scala:76:25]
wire _vcalloc_buffer_io_enq_ready; // @[IngressUnit.scala:75:30]
wire _vcalloc_buffer_io_deq_valid; // @[IngressUnit.scala:75:30]
wire _vcalloc_buffer_io_deq_bits_tail; // @[IngressUnit.scala:75:30]
wire _route_q_io_enq_ready; // @[IngressUnit.scala:27:23]
wire _route_q_io_deq_valid; // @[IngressUnit.scala:27:23]
wire _route_buffer_io_enq_ready; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_valid; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_bits_head; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_bits_tail; // @[IngressUnit.scala:26:28]
wire [36:0] _route_buffer_io_deq_bits_payload; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_bits_flow_vnet_id; // @[IngressUnit.scala:26:28]
wire [3:0] _route_buffer_io_deq_bits_flow_ingress_node; // @[IngressUnit.scala:26:28]
wire [1:0] _route_buffer_io_deq_bits_flow_ingress_node_id; // @[IngressUnit.scala:26:28]
wire [3:0] _route_buffer_io_deq_bits_flow_egress_node; // @[IngressUnit.scala:26:28]
wire [1:0] _route_buffer_io_deq_bits_flow_egress_node_id; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_bits_virt_channel_id; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T = io_in_bits_egress_id == 4'h7; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_1 = io_in_bits_egress_id == 4'h4; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_2 = io_in_bits_egress_id == 4'hB; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_3 = io_in_bits_egress_id == 4'h3; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_4 = io_in_bits_egress_id == 4'h8; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_5 = io_in_bits_egress_id == 4'hA; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_6 = io_in_bits_egress_id == 4'hC; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_7 = io_in_bits_egress_id == 4'h5; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_8 = io_in_bits_egress_id == 4'h2; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_34 = io_in_bits_egress_id == 4'h1; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_10 = io_in_bits_egress_id == 4'h9; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_11 = io_in_bits_egress_id == 4'h6; // @[IngressUnit.scala:30:72]
wire [3:0] _route_buffer_io_enq_bits_flow_egress_node_T_25 = {_route_buffer_io_enq_bits_flow_egress_node_id_T, 1'h0, {2{_route_buffer_io_enq_bits_flow_egress_node_id_T_1}}} | (_route_buffer_io_enq_bits_flow_egress_node_id_T_2 ? 4'hE : 4'h0); // @[Mux.scala:30:73]
wire [3:0] _route_buffer_io_enq_bits_flow_egress_node_T_29 = {_route_buffer_io_enq_bits_flow_egress_node_T_25[3:2], _route_buffer_io_enq_bits_flow_egress_node_T_25[1:0] | {_route_buffer_io_enq_bits_flow_egress_node_id_T_3, 1'h0}} | (_route_buffer_io_enq_bits_flow_egress_node_id_T_4 ? 4'hB : 4'h0) | (_route_buffer_io_enq_bits_flow_egress_node_id_T_5 ? 4'hD : 4'h0) | {4{_route_buffer_io_enq_bits_flow_egress_node_id_T_6}}; // @[Mux.scala:30:73]
wire [2:0] _GEN = _route_buffer_io_enq_bits_flow_egress_node_T_29[2:0] | {_route_buffer_io_enq_bits_flow_egress_node_id_T_7, 2'h0}; // @[Mux.scala:30:73]
wire [3:0] _route_buffer_io_enq_bits_flow_egress_node_T_33 = {_route_buffer_io_enq_bits_flow_egress_node_T_29[3], _GEN[2:1], _GEN[0] | _route_buffer_io_enq_bits_flow_egress_node_id_T_8} | (_route_buffer_io_enq_bits_flow_egress_node_id_T_10 ? 4'hC : 4'h0); // @[Mux.scala:30:73]
wire [3:0] _route_buffer_io_enq_bits_flow_egress_node_T_34 = {_route_buffer_io_enq_bits_flow_egress_node_T_33[3], _route_buffer_io_enq_bits_flow_egress_node_T_33[2:0] | {3{_route_buffer_io_enq_bits_flow_egress_node_id_T_11}}}; // @[Mux.scala:30:73]
wire [1:0] route_buffer_io_enq_bits_flow_egress_node_id = {1'h0, _route_buffer_io_enq_bits_flow_egress_node_id_T_34}; // @[IngressUnit.scala:30:72, :45:50]
wire _GEN_0 = _route_buffer_io_enq_ready & io_in_valid & io_in_bits_head & _route_buffer_io_enq_bits_flow_egress_node_T_34 == 4'h5; // @[Mux.scala:30:73]
wire route_q_io_enq_valid = _GEN_0 | io_in_valid & _route_buffer_io_enq_ready & io_in_bits_head & _route_buffer_io_enq_bits_flow_egress_node_T_34 != 4'h5; // @[Mux.scala:30:73]
wire io_vcalloc_req_valid_0 = _route_buffer_io_deq_valid & _route_q_io_deq_valid & _route_buffer_io_deq_bits_head & _vcalloc_buffer_io_enq_ready & _vcalloc_q_io_enq_ready; // @[IngressUnit.scala:26:28, :27:23, :75:30, :76:25, :91:{54,78}, :92:{10,41}]
wire route_buffer_io_deq_ready = _vcalloc_buffer_io_enq_ready & (_route_q_io_deq_valid | ~_route_buffer_io_deq_bits_head) & (io_vcalloc_req_ready | ~_route_buffer_io_deq_bits_head) & (_vcalloc_q_io_enq_ready | ~_route_buffer_io_deq_bits_head); // @[IngressUnit.scala:26:28, :27:23, :75:30, :76:25, :88:30, :93:61, :94:{27,37}, :95:{27,37}, :96:29]
wire vcalloc_q_io_enq_valid = io_vcalloc_req_ready & io_vcalloc_req_valid_0; // @[Decoupled.scala:51:35] |
Generate the Verilog code corresponding to the following Chisel files.
File MulRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (ported from Verilog to
Chisel by Andrew Waterman).
Copyright 2019, 2020 The Regents of the University of California. All rights
reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulFullRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(new RawFloat(expWidth, sigWidth))
val b = Input(new RawFloat(expWidth, sigWidth))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth*2 - 1))
})
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
val notSigNaN_invalidExc = (io.a.isInf && io.b.isZero) || (io.a.isZero && io.b.isInf)
val notNaN_isInfOut = io.a.isInf || io.b.isInf
val notNaN_isZeroOut = io.a.isZero || io.b.isZero
val notNaN_signOut = io.a.sign ^ io.b.sign
val common_sExpOut = io.a.sExp + io.b.sExp - (1<<expWidth).S
val common_sigOut = (io.a.sig * io.b.sig)(sigWidth*2 - 1, 0)
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
io.invalidExc := isSigNaNRawFloat(io.a) || isSigNaNRawFloat(io.b) || notSigNaN_invalidExc
io.rawOut.isInf := notNaN_isInfOut
io.rawOut.isZero := notNaN_isZeroOut
io.rawOut.sExp := common_sExpOut
io.rawOut.isNaN := io.a.isNaN || io.b.isNaN
io.rawOut.sign := notNaN_signOut
io.rawOut.sig := common_sigOut
}
class MulRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(new RawFloat(expWidth, sigWidth))
val b = Input(new RawFloat(expWidth, sigWidth))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
val mulFullRaw = Module(new MulFullRawFN(expWidth, sigWidth))
mulFullRaw.io.a := io.a
mulFullRaw.io.b := io.b
io.invalidExc := mulFullRaw.io.invalidExc
io.rawOut := mulFullRaw.io.rawOut
io.rawOut.sig := {
val sig = mulFullRaw.io.rawOut.sig
Cat(sig >> (sigWidth - 2), sig(sigWidth - 3, 0).orR)
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulRecFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(UInt((expWidth + sigWidth + 1).W))
val b = Input(UInt((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(Bool())
val out = Output(UInt((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(UInt(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulRawFN = Module(new MulRawFN(expWidth, sigWidth))
mulRawFN.io.a := rawFloatFromRecFN(expWidth, sigWidth, io.a)
mulRawFN.io.b := rawFloatFromRecFN(expWidth, sigWidth, io.b)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN =
Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0))
roundRawFNToRecFN.io.invalidExc := mulRawFN.io.invalidExc
roundRawFNToRecFN.io.infiniteExc := false.B
roundRawFNToRecFN.io.in := mulRawFN.io.rawOut
roundRawFNToRecFN.io.roundingMode := io.roundingMode
roundRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
| module MulFullRawFN_20( // @[MulRecFN.scala:47:7]
input io_a_isNaN, // @[MulRecFN.scala:49:16]
input io_a_isInf, // @[MulRecFN.scala:49:16]
input io_a_isZero, // @[MulRecFN.scala:49:16]
input io_a_sign, // @[MulRecFN.scala:49:16]
input [9:0] io_a_sExp, // @[MulRecFN.scala:49:16]
input [24:0] io_a_sig, // @[MulRecFN.scala:49:16]
input io_b_isNaN, // @[MulRecFN.scala:49:16]
input io_b_isInf, // @[MulRecFN.scala:49:16]
input io_b_isZero, // @[MulRecFN.scala:49:16]
input io_b_sign, // @[MulRecFN.scala:49:16]
input [9:0] io_b_sExp, // @[MulRecFN.scala:49:16]
input [24:0] io_b_sig, // @[MulRecFN.scala:49:16]
output io_invalidExc, // @[MulRecFN.scala:49:16]
output io_rawOut_isNaN, // @[MulRecFN.scala:49:16]
output io_rawOut_isInf, // @[MulRecFN.scala:49:16]
output io_rawOut_isZero, // @[MulRecFN.scala:49:16]
output io_rawOut_sign, // @[MulRecFN.scala:49:16]
output [9:0] io_rawOut_sExp, // @[MulRecFN.scala:49:16]
output [47:0] io_rawOut_sig // @[MulRecFN.scala:49:16]
);
wire io_a_isNaN_0 = io_a_isNaN; // @[MulRecFN.scala:47:7]
wire io_a_isInf_0 = io_a_isInf; // @[MulRecFN.scala:47:7]
wire io_a_isZero_0 = io_a_isZero; // @[MulRecFN.scala:47:7]
wire io_a_sign_0 = io_a_sign; // @[MulRecFN.scala:47:7]
wire [9:0] io_a_sExp_0 = io_a_sExp; // @[MulRecFN.scala:47:7]
wire [24:0] io_a_sig_0 = io_a_sig; // @[MulRecFN.scala:47:7]
wire io_b_isNaN_0 = io_b_isNaN; // @[MulRecFN.scala:47:7]
wire io_b_isInf_0 = io_b_isInf; // @[MulRecFN.scala:47:7]
wire io_b_isZero_0 = io_b_isZero; // @[MulRecFN.scala:47:7]
wire io_b_sign_0 = io_b_sign; // @[MulRecFN.scala:47:7]
wire [9:0] io_b_sExp_0 = io_b_sExp; // @[MulRecFN.scala:47:7]
wire [24:0] io_b_sig_0 = io_b_sig; // @[MulRecFN.scala:47:7]
wire _io_invalidExc_T_7; // @[MulRecFN.scala:66:71]
wire _io_rawOut_isNaN_T; // @[MulRecFN.scala:70:35]
wire notNaN_isInfOut; // @[MulRecFN.scala:59:38]
wire notNaN_isZeroOut; // @[MulRecFN.scala:60:40]
wire notNaN_signOut; // @[MulRecFN.scala:61:36]
wire [9:0] common_sExpOut; // @[MulRecFN.scala:62:48]
wire [47:0] common_sigOut; // @[MulRecFN.scala:63:46]
wire io_rawOut_isNaN_0; // @[MulRecFN.scala:47:7]
wire io_rawOut_isInf_0; // @[MulRecFN.scala:47:7]
wire io_rawOut_isZero_0; // @[MulRecFN.scala:47:7]
wire io_rawOut_sign_0; // @[MulRecFN.scala:47:7]
wire [9:0] io_rawOut_sExp_0; // @[MulRecFN.scala:47:7]
wire [47:0] io_rawOut_sig_0; // @[MulRecFN.scala:47:7]
wire io_invalidExc_0; // @[MulRecFN.scala:47:7]
wire _notSigNaN_invalidExc_T = io_a_isInf_0 & io_b_isZero_0; // @[MulRecFN.scala:47:7, :58:44]
wire _notSigNaN_invalidExc_T_1 = io_a_isZero_0 & io_b_isInf_0; // @[MulRecFN.scala:47:7, :58:76]
wire notSigNaN_invalidExc = _notSigNaN_invalidExc_T | _notSigNaN_invalidExc_T_1; // @[MulRecFN.scala:58:{44,60,76}]
assign notNaN_isInfOut = io_a_isInf_0 | io_b_isInf_0; // @[MulRecFN.scala:47:7, :59:38]
assign io_rawOut_isInf_0 = notNaN_isInfOut; // @[MulRecFN.scala:47:7, :59:38]
assign notNaN_isZeroOut = io_a_isZero_0 | io_b_isZero_0; // @[MulRecFN.scala:47:7, :60:40]
assign io_rawOut_isZero_0 = notNaN_isZeroOut; // @[MulRecFN.scala:47:7, :60:40]
assign notNaN_signOut = io_a_sign_0 ^ io_b_sign_0; // @[MulRecFN.scala:47:7, :61:36]
assign io_rawOut_sign_0 = notNaN_signOut; // @[MulRecFN.scala:47:7, :61:36]
wire [10:0] _common_sExpOut_T = {io_a_sExp_0[9], io_a_sExp_0} + {io_b_sExp_0[9], io_b_sExp_0}; // @[MulRecFN.scala:47:7, :62:36]
wire [9:0] _common_sExpOut_T_1 = _common_sExpOut_T[9:0]; // @[MulRecFN.scala:62:36]
wire [9:0] _common_sExpOut_T_2 = _common_sExpOut_T_1; // @[MulRecFN.scala:62:36]
wire [10:0] _common_sExpOut_T_3 = {_common_sExpOut_T_2[9], _common_sExpOut_T_2} - 11'h100; // @[MulRecFN.scala:62:{36,48}]
wire [9:0] _common_sExpOut_T_4 = _common_sExpOut_T_3[9:0]; // @[MulRecFN.scala:62:48]
assign common_sExpOut = _common_sExpOut_T_4; // @[MulRecFN.scala:62:48]
assign io_rawOut_sExp_0 = common_sExpOut; // @[MulRecFN.scala:47:7, :62:48]
wire [49:0] _common_sigOut_T = {25'h0, io_a_sig_0} * {25'h0, io_b_sig_0}; // @[MulRecFN.scala:47:7, :63:35]
assign common_sigOut = _common_sigOut_T[47:0]; // @[MulRecFN.scala:63:{35,46}]
assign io_rawOut_sig_0 = common_sigOut; // @[MulRecFN.scala:47:7, :63:46]
wire _io_invalidExc_T = io_a_sig_0[22]; // @[common.scala:82:56]
wire _io_invalidExc_T_1 = ~_io_invalidExc_T; // @[common.scala:82:{49,56}]
wire _io_invalidExc_T_2 = io_a_isNaN_0 & _io_invalidExc_T_1; // @[common.scala:82:{46,49}]
wire _io_invalidExc_T_3 = io_b_sig_0[22]; // @[common.scala:82:56]
wire _io_invalidExc_T_4 = ~_io_invalidExc_T_3; // @[common.scala:82:{49,56}]
wire _io_invalidExc_T_5 = io_b_isNaN_0 & _io_invalidExc_T_4; // @[common.scala:82:{46,49}]
wire _io_invalidExc_T_6 = _io_invalidExc_T_2 | _io_invalidExc_T_5; // @[common.scala:82:46]
assign _io_invalidExc_T_7 = _io_invalidExc_T_6 | notSigNaN_invalidExc; // @[MulRecFN.scala:58:60, :66:{45,71}]
assign io_invalidExc_0 = _io_invalidExc_T_7; // @[MulRecFN.scala:47:7, :66:71]
assign _io_rawOut_isNaN_T = io_a_isNaN_0 | io_b_isNaN_0; // @[MulRecFN.scala:47:7, :70:35]
assign io_rawOut_isNaN_0 = _io_rawOut_isNaN_T; // @[MulRecFN.scala:47:7, :70:35]
assign io_invalidExc = io_invalidExc_0; // @[MulRecFN.scala:47:7]
assign io_rawOut_isNaN = io_rawOut_isNaN_0; // @[MulRecFN.scala:47:7]
assign io_rawOut_isInf = io_rawOut_isInf_0; // @[MulRecFN.scala:47:7]
assign io_rawOut_isZero = io_rawOut_isZero_0; // @[MulRecFN.scala:47:7]
assign io_rawOut_sign = io_rawOut_sign_0; // @[MulRecFN.scala:47:7]
assign io_rawOut_sExp = io_rawOut_sExp_0; // @[MulRecFN.scala:47:7]
assign io_rawOut_sig = io_rawOut_sig_0; // @[MulRecFN.scala:47:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File util.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v3.common.{MicroOp}
import boom.v3.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U}
def apply(ip: UInt, isel: UInt): SInt = {
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt
}
}
/**
* Object to get the FP rounding mode out of a packed immediate.
*/
object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } }
/**
* Object to get the FP function fype from a packed immediate.
* Note: only works if !(IS_B or IS_S)
*/
object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } }
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v3.common.BoomModule()(p)
with boom.v3.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire)
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop)
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop))
io.deq.bits := out
io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop)
// For flow queue behavior.
if (flow) {
when (io.empty) {
io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop)
io.deq.bits := io.enq.bits
io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
do_deq := false.B
when (io.deq.ready) { do_enq := false.B }
}
}
private val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
}
File tage.scala:
package boom.v3.ifu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import boom.v3.common._
import boom.v3.util.{BoomCoreStringPrefix, MaskLower, WrapInc}
import scala.math.min
class TageResp extends Bundle {
val ctr = UInt(3.W)
val u = UInt(2.W)
}
class TageTable(val nRows: Int, val tagSz: Int, val histLength: Int, val uBitPeriod: Int)
(implicit p: Parameters) extends BoomModule()(p)
with HasBoomFrontendParameters
{
require(histLength <= globalHistoryLength)
val nWrBypassEntries = 2
val io = IO( new Bundle {
val f1_req_valid = Input(Bool())
val f1_req_pc = Input(UInt(vaddrBitsExtended.W))
val f1_req_ghist = Input(UInt(globalHistoryLength.W))
val f3_resp = Output(Vec(bankWidth, Valid(new TageResp)))
val update_mask = Input(Vec(bankWidth, Bool()))
val update_taken = Input(Vec(bankWidth, Bool()))
val update_alloc = Input(Vec(bankWidth, Bool()))
val update_old_ctr = Input(Vec(bankWidth, UInt(3.W)))
val update_pc = Input(UInt())
val update_hist = Input(UInt())
val update_u_mask = Input(Vec(bankWidth, Bool()))
val update_u = Input(Vec(bankWidth, UInt(2.W)))
})
def compute_folded_hist(hist: UInt, l: Int) = {
val nChunks = (histLength + l - 1) / l
val hist_chunks = (0 until nChunks) map {i =>
hist(min((i+1)*l, histLength)-1, i*l)
}
hist_chunks.reduce(_^_)
}
def compute_tag_and_hash(unhashed_idx: UInt, hist: UInt) = {
val idx_history = compute_folded_hist(hist, log2Ceil(nRows))
val idx = (unhashed_idx ^ idx_history)(log2Ceil(nRows)-1,0)
val tag_history = compute_folded_hist(hist, tagSz)
val tag = ((unhashed_idx >> log2Ceil(nRows)) ^ tag_history)(tagSz-1,0)
(idx, tag)
}
def inc_ctr(ctr: UInt, taken: Bool): UInt = {
Mux(!taken, Mux(ctr === 0.U, 0.U, ctr - 1.U),
Mux(ctr === 7.U, 7.U, ctr + 1.U))
}
val doing_reset = RegInit(true.B)
val reset_idx = RegInit(0.U(log2Ceil(nRows).W))
reset_idx := reset_idx + doing_reset
when (reset_idx === (nRows-1).U) { doing_reset := false.B }
class TageEntry extends Bundle {
val valid = Bool() // TODO: Remove this valid bit
val tag = UInt(tagSz.W)
val ctr = UInt(3.W)
}
val tageEntrySz = 1 + tagSz + 3
val (s1_hashed_idx, s1_tag) = compute_tag_and_hash(fetchIdx(io.f1_req_pc), io.f1_req_ghist)
val hi_us = SyncReadMem(nRows, Vec(bankWidth, Bool()))
val lo_us = SyncReadMem(nRows, Vec(bankWidth, Bool()))
val table = SyncReadMem(nRows, Vec(bankWidth, UInt(tageEntrySz.W)))
val mems = Seq((f"tage_l$histLength", nRows, bankWidth * tageEntrySz))
val s2_tag = RegNext(s1_tag)
val s2_req_rtage = VecInit(table.read(s1_hashed_idx, io.f1_req_valid).map(_.asTypeOf(new TageEntry)))
val s2_req_rhius = hi_us.read(s1_hashed_idx, io.f1_req_valid)
val s2_req_rlous = lo_us.read(s1_hashed_idx, io.f1_req_valid)
val s2_req_rhits = VecInit(s2_req_rtage.map(e => e.valid && e.tag === s2_tag && !doing_reset))
for (w <- 0 until bankWidth) {
// This bit indicates the TAGE table matched here
io.f3_resp(w).valid := RegNext(s2_req_rhits(w))
io.f3_resp(w).bits.u := RegNext(Cat(s2_req_rhius(w), s2_req_rlous(w)))
io.f3_resp(w).bits.ctr := RegNext(s2_req_rtage(w).ctr)
}
val clear_u_ctr = RegInit(0.U((log2Ceil(uBitPeriod) + log2Ceil(nRows) + 1).W))
when (doing_reset) { clear_u_ctr := 1.U } .otherwise { clear_u_ctr := clear_u_ctr + 1.U }
val doing_clear_u = clear_u_ctr(log2Ceil(uBitPeriod)-1,0) === 0.U
val doing_clear_u_hi = doing_clear_u && clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 1.U
val doing_clear_u_lo = doing_clear_u && clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 0.U
val clear_u_idx = clear_u_ctr >> log2Ceil(uBitPeriod)
val (update_idx, update_tag) = compute_tag_and_hash(fetchIdx(io.update_pc), io.update_hist)
val update_wdata = Wire(Vec(bankWidth, new TageEntry))
table.write(
Mux(doing_reset, reset_idx , update_idx),
Mux(doing_reset, VecInit(Seq.fill(bankWidth) { 0.U(tageEntrySz.W) }), VecInit(update_wdata.map(_.asUInt))),
Mux(doing_reset, ~(0.U(bankWidth.W)) , io.update_mask.asUInt).asBools
)
val update_hi_wdata = Wire(Vec(bankWidth, Bool()))
hi_us.write(
Mux(doing_reset, reset_idx, Mux(doing_clear_u_hi, clear_u_idx, update_idx)),
Mux(doing_reset || doing_clear_u_hi, VecInit((0.U(bankWidth.W)).asBools), update_hi_wdata),
Mux(doing_reset || doing_clear_u_hi, ~(0.U(bankWidth.W)), io.update_u_mask.asUInt).asBools
)
val update_lo_wdata = Wire(Vec(bankWidth, Bool()))
lo_us.write(
Mux(doing_reset, reset_idx, Mux(doing_clear_u_lo, clear_u_idx, update_idx)),
Mux(doing_reset || doing_clear_u_lo, VecInit((0.U(bankWidth.W)).asBools), update_lo_wdata),
Mux(doing_reset || doing_clear_u_lo, ~(0.U(bankWidth.W)), io.update_u_mask.asUInt).asBools
)
val wrbypass_tags = Reg(Vec(nWrBypassEntries, UInt(tagSz.W)))
val wrbypass_idxs = Reg(Vec(nWrBypassEntries, UInt(log2Ceil(nRows).W)))
val wrbypass = Reg(Vec(nWrBypassEntries, Vec(bankWidth, UInt(3.W))))
val wrbypass_enq_idx = RegInit(0.U(log2Ceil(nWrBypassEntries).W))
val wrbypass_hits = VecInit((0 until nWrBypassEntries) map { i =>
!doing_reset &&
wrbypass_tags(i) === update_tag &&
wrbypass_idxs(i) === update_idx
})
val wrbypass_hit = wrbypass_hits.reduce(_||_)
val wrbypass_hit_idx = PriorityEncoder(wrbypass_hits)
for (w <- 0 until bankWidth) {
update_wdata(w).ctr := Mux(io.update_alloc(w),
Mux(io.update_taken(w), 4.U,
3.U
),
Mux(wrbypass_hit, inc_ctr(wrbypass(wrbypass_hit_idx)(w), io.update_taken(w)),
inc_ctr(io.update_old_ctr(w), io.update_taken(w))
)
)
update_wdata(w).valid := true.B
update_wdata(w).tag := update_tag
update_hi_wdata(w) := io.update_u(w)(1)
update_lo_wdata(w) := io.update_u(w)(0)
}
when (io.update_mask.reduce(_||_)) {
when (wrbypass_hits.reduce(_||_)) {
wrbypass(wrbypass_hit_idx) := VecInit(update_wdata.map(_.ctr))
} .otherwise {
wrbypass (wrbypass_enq_idx) := VecInit(update_wdata.map(_.ctr))
wrbypass_tags(wrbypass_enq_idx) := update_tag
wrbypass_idxs(wrbypass_enq_idx) := update_idx
wrbypass_enq_idx := WrapInc(wrbypass_enq_idx, nWrBypassEntries)
}
}
}
case class BoomTageParams(
// nSets, histLen, tagSz
tableInfo: Seq[Tuple3[Int, Int, Int]] = Seq(( 128, 2, 7),
( 128, 4, 7),
( 256, 8, 8),
( 256, 16, 8),
( 128, 32, 9),
( 128, 64, 9)),
uBitPeriod: Int = 2048
)
class TageBranchPredictorBank(params: BoomTageParams = BoomTageParams())(implicit p: Parameters) extends BranchPredictorBank()(p)
{
val tageUBitPeriod = params.uBitPeriod
val tageNTables = params.tableInfo.size
class TageMeta extends Bundle
{
val provider = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W)))
val alt_differs = Vec(bankWidth, Output(Bool()))
val provider_u = Vec(bankWidth, Output(UInt(2.W)))
val provider_ctr = Vec(bankWidth, Output(UInt(3.W)))
val allocate = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W)))
}
val f3_meta = Wire(new TageMeta)
override val metaSz = f3_meta.asUInt.getWidth
require(metaSz <= bpdMaxMetaLength)
def inc_u(u: UInt, alt_differs: Bool, mispredict: Bool): UInt = {
Mux(!alt_differs, u,
Mux(mispredict, Mux(u === 0.U, 0.U, u - 1.U),
Mux(u === 3.U, 3.U, u + 1.U)))
}
val tt = params.tableInfo map {
case (n, l, s) => {
val t = Module(new TageTable(n, s, l, params.uBitPeriod))
t.io.f1_req_valid := RegNext(io.f0_valid)
t.io.f1_req_pc := RegNext(io.f0_pc)
t.io.f1_req_ghist := io.f1_ghist
(t, t.mems)
}
}
val tables = tt.map(_._1)
val mems = tt.map(_._2).flatten
val f3_resps = VecInit(tables.map(_.io.f3_resp))
val s1_update_meta = s1_update.bits.meta.asTypeOf(new TageMeta)
val s1_update_mispredict_mask = UIntToOH(s1_update.bits.cfi_idx.bits) &
Fill(bankWidth, s1_update.bits.cfi_mispredicted)
val s1_update_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, Bool()))))
val s1_update_u_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, UInt(1.W)))))
val s1_update_taken = Wire(Vec(tageNTables, Vec(bankWidth, Bool())))
val s1_update_old_ctr = Wire(Vec(tageNTables, Vec(bankWidth, UInt(3.W))))
val s1_update_alloc = Wire(Vec(tageNTables, Vec(bankWidth, Bool())))
val s1_update_u = Wire(Vec(tageNTables, Vec(bankWidth, UInt(2.W))))
s1_update_taken := DontCare
s1_update_old_ctr := DontCare
s1_update_alloc := DontCare
s1_update_u := DontCare
for (w <- 0 until bankWidth) {
var altpred = io.resp_in(0).f3(w).taken
val final_altpred = WireInit(io.resp_in(0).f3(w).taken)
var provided = false.B
var provider = 0.U
io.resp.f3(w).taken := io.resp_in(0).f3(w).taken
for (i <- 0 until tageNTables) {
val hit = f3_resps(i)(w).valid
val ctr = f3_resps(i)(w).bits.ctr
when (hit) {
io.resp.f3(w).taken := Mux(ctr === 3.U || ctr === 4.U, altpred, ctr(2))
final_altpred := altpred
}
provided = provided || hit
provider = Mux(hit, i.U, provider)
altpred = Mux(hit, f3_resps(i)(w).bits.ctr(2), altpred)
}
f3_meta.provider(w).valid := provided
f3_meta.provider(w).bits := provider
f3_meta.alt_differs(w) := final_altpred =/= io.resp.f3(w).taken
f3_meta.provider_u(w) := f3_resps(provider)(w).bits.u
f3_meta.provider_ctr(w) := f3_resps(provider)(w).bits.ctr
// Create a mask of tables which did not hit our query, and also contain useless entries
// and also uses a longer history than the provider
val allocatable_slots = (
VecInit(f3_resps.map(r => !r(w).valid && r(w).bits.u === 0.U)).asUInt &
~(MaskLower(UIntToOH(provider)) & Fill(tageNTables, provided))
)
val alloc_lfsr = random.LFSR(tageNTables max 2)
val first_entry = PriorityEncoder(allocatable_slots)
val masked_entry = PriorityEncoder(allocatable_slots & alloc_lfsr)
val alloc_entry = Mux(allocatable_slots(masked_entry),
masked_entry,
first_entry)
f3_meta.allocate(w).valid := allocatable_slots =/= 0.U
f3_meta.allocate(w).bits := alloc_entry
val update_was_taken = (s1_update.bits.cfi_idx.valid &&
(s1_update.bits.cfi_idx.bits === w.U) &&
s1_update.bits.cfi_taken)
when (s1_update.bits.br_mask(w) && s1_update.valid && s1_update.bits.is_commit_update) {
when (s1_update_meta.provider(w).valid) {
val provider = s1_update_meta.provider(w).bits
s1_update_mask(provider)(w) := true.B
s1_update_u_mask(provider)(w) := true.B
val new_u = inc_u(s1_update_meta.provider_u(w),
s1_update_meta.alt_differs(w),
s1_update_mispredict_mask(w))
s1_update_u (provider)(w) := new_u
s1_update_taken (provider)(w) := update_was_taken
s1_update_old_ctr(provider)(w) := s1_update_meta.provider_ctr(w)
s1_update_alloc (provider)(w) := false.B
}
}
}
when (s1_update.valid && s1_update.bits.is_commit_update && s1_update.bits.cfi_mispredicted && s1_update.bits.cfi_idx.valid) {
val idx = s1_update.bits.cfi_idx.bits
val allocate = s1_update_meta.allocate(idx)
when (allocate.valid) {
s1_update_mask (allocate.bits)(idx) := true.B
s1_update_taken(allocate.bits)(idx) := s1_update.bits.cfi_taken
s1_update_alloc(allocate.bits)(idx) := true.B
s1_update_u_mask(allocate.bits)(idx) := true.B
s1_update_u (allocate.bits)(idx) := 0.U
} .otherwise {
val provider = s1_update_meta.provider(idx)
val decr_mask = Mux(provider.valid, ~MaskLower(UIntToOH(provider.bits)), 0.U)
for (i <- 0 until tageNTables) {
when (decr_mask(i)) {
s1_update_u_mask(i)(idx) := true.B
s1_update_u (i)(idx) := 0.U
}
}
}
}
for (i <- 0 until tageNTables) {
for (w <- 0 until bankWidth) {
tables(i).io.update_mask(w) := RegNext(s1_update_mask(i)(w))
tables(i).io.update_taken(w) := RegNext(s1_update_taken(i)(w))
tables(i).io.update_alloc(w) := RegNext(s1_update_alloc(i)(w))
tables(i).io.update_old_ctr(w) := RegNext(s1_update_old_ctr(i)(w))
tables(i).io.update_u_mask(w) := RegNext(s1_update_u_mask(i)(w))
tables(i).io.update_u(w) := RegNext(s1_update_u(i)(w))
}
tables(i).io.update_pc := RegNext(s1_update.bits.pc)
tables(i).io.update_hist := RegNext(s1_update.bits.ghist)
}
//io.f3_meta := Cat(f3_meta.asUInt, micro.io.f3_meta(micro.metaSz-1,0), base.io.f3_meta(base.metaSz-1, 0))
io.f3_meta := f3_meta.asUInt
}
| module TageTable_2( // @[tage.scala:24:7]
input clock, // @[tage.scala:24:7]
input reset, // @[tage.scala:24:7]
input io_f1_req_valid, // @[tage.scala:31:14]
input [39:0] io_f1_req_pc, // @[tage.scala:31:14]
input [63:0] io_f1_req_ghist, // @[tage.scala:31:14]
output io_f3_resp_0_valid, // @[tage.scala:31:14]
output [2:0] io_f3_resp_0_bits_ctr, // @[tage.scala:31:14]
output [1:0] io_f3_resp_0_bits_u, // @[tage.scala:31:14]
output io_f3_resp_1_valid, // @[tage.scala:31:14]
output [2:0] io_f3_resp_1_bits_ctr, // @[tage.scala:31:14]
output [1:0] io_f3_resp_1_bits_u, // @[tage.scala:31:14]
output io_f3_resp_2_valid, // @[tage.scala:31:14]
output [2:0] io_f3_resp_2_bits_ctr, // @[tage.scala:31:14]
output [1:0] io_f3_resp_2_bits_u, // @[tage.scala:31:14]
output io_f3_resp_3_valid, // @[tage.scala:31:14]
output [2:0] io_f3_resp_3_bits_ctr, // @[tage.scala:31:14]
output [1:0] io_f3_resp_3_bits_u, // @[tage.scala:31:14]
input io_update_mask_0, // @[tage.scala:31:14]
input io_update_mask_1, // @[tage.scala:31:14]
input io_update_mask_2, // @[tage.scala:31:14]
input io_update_mask_3, // @[tage.scala:31:14]
input io_update_taken_0, // @[tage.scala:31:14]
input io_update_taken_1, // @[tage.scala:31:14]
input io_update_taken_2, // @[tage.scala:31:14]
input io_update_taken_3, // @[tage.scala:31:14]
input io_update_alloc_0, // @[tage.scala:31:14]
input io_update_alloc_1, // @[tage.scala:31:14]
input io_update_alloc_2, // @[tage.scala:31:14]
input io_update_alloc_3, // @[tage.scala:31:14]
input [2:0] io_update_old_ctr_0, // @[tage.scala:31:14]
input [2:0] io_update_old_ctr_1, // @[tage.scala:31:14]
input [2:0] io_update_old_ctr_2, // @[tage.scala:31:14]
input [2:0] io_update_old_ctr_3, // @[tage.scala:31:14]
input [39:0] io_update_pc, // @[tage.scala:31:14]
input [63:0] io_update_hist, // @[tage.scala:31:14]
input io_update_u_mask_0, // @[tage.scala:31:14]
input io_update_u_mask_1, // @[tage.scala:31:14]
input io_update_u_mask_2, // @[tage.scala:31:14]
input io_update_u_mask_3, // @[tage.scala:31:14]
input [1:0] io_update_u_0, // @[tage.scala:31:14]
input [1:0] io_update_u_1, // @[tage.scala:31:14]
input [1:0] io_update_u_2, // @[tage.scala:31:14]
input [1:0] io_update_u_3 // @[tage.scala:31:14]
);
wire lo_us_MPORT_2_data_3; // @[tage.scala:137:8]
wire lo_us_MPORT_2_data_2; // @[tage.scala:137:8]
wire lo_us_MPORT_2_data_1; // @[tage.scala:137:8]
wire lo_us_MPORT_2_data_0; // @[tage.scala:137:8]
wire hi_us_MPORT_1_data_3; // @[tage.scala:130:8]
wire hi_us_MPORT_1_data_2; // @[tage.scala:130:8]
wire hi_us_MPORT_1_data_1; // @[tage.scala:130:8]
wire hi_us_MPORT_1_data_0; // @[tage.scala:130:8]
wire [11:0] table_MPORT_data_3; // @[tage.scala:123:8]
wire [11:0] table_MPORT_data_2; // @[tage.scala:123:8]
wire [11:0] table_MPORT_data_1; // @[tage.scala:123:8]
wire [11:0] table_MPORT_data_0; // @[tage.scala:123:8]
wire _s2_req_rtage_WIRE_7_valid; // @[tage.scala:97:87]
wire [7:0] _s2_req_rtage_WIRE_7_tag; // @[tage.scala:97:87]
wire [2:0] _s2_req_rtage_WIRE_7_ctr; // @[tage.scala:97:87]
wire _s2_req_rtage_WIRE_5_valid; // @[tage.scala:97:87]
wire [7:0] _s2_req_rtage_WIRE_5_tag; // @[tage.scala:97:87]
wire [2:0] _s2_req_rtage_WIRE_5_ctr; // @[tage.scala:97:87]
wire _s2_req_rtage_WIRE_3_valid; // @[tage.scala:97:87]
wire [7:0] _s2_req_rtage_WIRE_3_tag; // @[tage.scala:97:87]
wire [2:0] _s2_req_rtage_WIRE_3_ctr; // @[tage.scala:97:87]
wire _s2_req_rtage_WIRE_1_valid; // @[tage.scala:97:87]
wire [7:0] _s2_req_rtage_WIRE_1_tag; // @[tage.scala:97:87]
wire [2:0] _s2_req_rtage_WIRE_1_ctr; // @[tage.scala:97:87]
wire [47:0] _table_R0_data; // @[tage.scala:91:27]
wire [3:0] _lo_us_R0_data; // @[tage.scala:90:27]
wire [3:0] _hi_us_R0_data; // @[tage.scala:89:27]
wire io_f1_req_valid_0 = io_f1_req_valid; // @[tage.scala:24:7]
wire [39:0] io_f1_req_pc_0 = io_f1_req_pc; // @[tage.scala:24:7]
wire [63:0] io_f1_req_ghist_0 = io_f1_req_ghist; // @[tage.scala:24:7]
wire io_update_mask_0_0 = io_update_mask_0; // @[tage.scala:24:7]
wire io_update_mask_1_0 = io_update_mask_1; // @[tage.scala:24:7]
wire io_update_mask_2_0 = io_update_mask_2; // @[tage.scala:24:7]
wire io_update_mask_3_0 = io_update_mask_3; // @[tage.scala:24:7]
wire io_update_taken_0_0 = io_update_taken_0; // @[tage.scala:24:7]
wire io_update_taken_1_0 = io_update_taken_1; // @[tage.scala:24:7]
wire io_update_taken_2_0 = io_update_taken_2; // @[tage.scala:24:7]
wire io_update_taken_3_0 = io_update_taken_3; // @[tage.scala:24:7]
wire io_update_alloc_0_0 = io_update_alloc_0; // @[tage.scala:24:7]
wire io_update_alloc_1_0 = io_update_alloc_1; // @[tage.scala:24:7]
wire io_update_alloc_2_0 = io_update_alloc_2; // @[tage.scala:24:7]
wire io_update_alloc_3_0 = io_update_alloc_3; // @[tage.scala:24:7]
wire [2:0] io_update_old_ctr_0_0 = io_update_old_ctr_0; // @[tage.scala:24:7]
wire [2:0] io_update_old_ctr_1_0 = io_update_old_ctr_1; // @[tage.scala:24:7]
wire [2:0] io_update_old_ctr_2_0 = io_update_old_ctr_2; // @[tage.scala:24:7]
wire [2:0] io_update_old_ctr_3_0 = io_update_old_ctr_3; // @[tage.scala:24:7]
wire [39:0] io_update_pc_0 = io_update_pc; // @[tage.scala:24:7]
wire [63:0] io_update_hist_0 = io_update_hist; // @[tage.scala:24:7]
wire io_update_u_mask_0_0 = io_update_u_mask_0; // @[tage.scala:24:7]
wire io_update_u_mask_1_0 = io_update_u_mask_1; // @[tage.scala:24:7]
wire io_update_u_mask_2_0 = io_update_u_mask_2; // @[tage.scala:24:7]
wire io_update_u_mask_3_0 = io_update_u_mask_3; // @[tage.scala:24:7]
wire [1:0] io_update_u_0_0 = io_update_u_0; // @[tage.scala:24:7]
wire [1:0] io_update_u_1_0 = io_update_u_1; // @[tage.scala:24:7]
wire [1:0] io_update_u_2_0 = io_update_u_2; // @[tage.scala:24:7]
wire [1:0] io_update_u_3_0 = io_update_u_3; // @[tage.scala:24:7]
wire update_wdata_0_valid = 1'h1; // @[tage.scala:119:26]
wire update_wdata_1_valid = 1'h1; // @[tage.scala:119:26]
wire update_wdata_2_valid = 1'h1; // @[tage.scala:119:26]
wire update_wdata_3_valid = 1'h1; // @[tage.scala:119:26]
wire [2:0] io_f3_resp_0_bits_ctr_0; // @[tage.scala:24:7]
wire [1:0] io_f3_resp_0_bits_u_0; // @[tage.scala:24:7]
wire io_f3_resp_0_valid_0; // @[tage.scala:24:7]
wire [2:0] io_f3_resp_1_bits_ctr_0; // @[tage.scala:24:7]
wire [1:0] io_f3_resp_1_bits_u_0; // @[tage.scala:24:7]
wire io_f3_resp_1_valid_0; // @[tage.scala:24:7]
wire [2:0] io_f3_resp_2_bits_ctr_0; // @[tage.scala:24:7]
wire [1:0] io_f3_resp_2_bits_u_0; // @[tage.scala:24:7]
wire io_f3_resp_2_valid_0; // @[tage.scala:24:7]
wire [2:0] io_f3_resp_3_bits_ctr_0; // @[tage.scala:24:7]
wire [1:0] io_f3_resp_3_bits_u_0; // @[tage.scala:24:7]
wire io_f3_resp_3_valid_0; // @[tage.scala:24:7]
reg doing_reset; // @[tage.scala:72:28]
reg [7:0] reset_idx; // @[tage.scala:73:26]
wire [8:0] _reset_idx_T = {1'h0, reset_idx} + {8'h0, doing_reset}; // @[tage.scala:72:28, :73:26, :74:26]
wire [7:0] _reset_idx_T_1 = _reset_idx_T[7:0]; // @[tage.scala:74:26]
wire [7:0] idx_history = io_f1_req_ghist_0[7:0]; // @[tage.scala:24:7, :53:11]
wire [7:0] tag_history = io_f1_req_ghist_0[7:0]; // @[tage.scala:24:7, :53:11]
wire [28:0] _tag_T = io_f1_req_pc_0[39:11]; // @[frontend.scala:162:35]
wire [36:0] _idx_T = {_tag_T, io_f1_req_pc_0[10:3] ^ idx_history}; // @[frontend.scala:162:35]
wire [7:0] s1_hashed_idx = _idx_T[7:0]; // @[tage.scala:60:{29,43}]
wire [7:0] _s2_req_rtage_WIRE = s1_hashed_idx; // @[tage.scala:60:43, :97:40]
wire [7:0] _s2_req_rhius_WIRE = s1_hashed_idx; // @[tage.scala:60:43, :98:32]
wire [7:0] _s2_req_rlous_WIRE = s1_hashed_idx; // @[tage.scala:60:43, :99:32]
wire [28:0] _tag_T_1 = {_tag_T[28:8], _tag_T[7:0] ^ tag_history}; // @[tage.scala:53:11, :62:{30,50}]
wire [7:0] s1_tag = _tag_T_1[7:0]; // @[tage.scala:62:{50,64}]
wire [11:0] _s2_req_rtage_WIRE_2 = _table_R0_data[11:0]; // @[tage.scala:91:27, :97:87]
wire [11:0] _s2_req_rtage_WIRE_4 = _table_R0_data[23:12]; // @[tage.scala:91:27, :97:87]
wire [11:0] _s2_req_rtage_WIRE_6 = _table_R0_data[35:24]; // @[tage.scala:91:27, :97:87]
wire [11:0] _s2_req_rtage_WIRE_8 = _table_R0_data[47:36]; // @[tage.scala:91:27, :97:87]
reg [7:0] s2_tag; // @[tage.scala:95:29]
wire _s2_req_rtage_T_2; // @[tage.scala:97:87]
wire [7:0] _s2_req_rtage_T_1; // @[tage.scala:97:87]
wire s2_req_rtage_0_valid = _s2_req_rtage_WIRE_1_valid; // @[tage.scala:97:{29,87}]
wire [2:0] _s2_req_rtage_T; // @[tage.scala:97:87]
wire [7:0] s2_req_rtage_0_tag = _s2_req_rtage_WIRE_1_tag; // @[tage.scala:97:{29,87}]
wire [2:0] s2_req_rtage_0_ctr = _s2_req_rtage_WIRE_1_ctr; // @[tage.scala:97:{29,87}]
assign _s2_req_rtage_T = _s2_req_rtage_WIRE_2[2:0]; // @[tage.scala:97:87]
assign _s2_req_rtage_WIRE_1_ctr = _s2_req_rtage_T; // @[tage.scala:97:87]
assign _s2_req_rtage_T_1 = _s2_req_rtage_WIRE_2[10:3]; // @[tage.scala:97:87]
assign _s2_req_rtage_WIRE_1_tag = _s2_req_rtage_T_1; // @[tage.scala:97:87]
assign _s2_req_rtage_T_2 = _s2_req_rtage_WIRE_2[11]; // @[tage.scala:97:87]
assign _s2_req_rtage_WIRE_1_valid = _s2_req_rtage_T_2; // @[tage.scala:97:87]
wire _s2_req_rtage_T_5; // @[tage.scala:97:87]
wire [7:0] _s2_req_rtage_T_4; // @[tage.scala:97:87]
wire s2_req_rtage_1_valid = _s2_req_rtage_WIRE_3_valid; // @[tage.scala:97:{29,87}]
wire [2:0] _s2_req_rtage_T_3; // @[tage.scala:97:87]
wire [7:0] s2_req_rtage_1_tag = _s2_req_rtage_WIRE_3_tag; // @[tage.scala:97:{29,87}]
wire [2:0] s2_req_rtage_1_ctr = _s2_req_rtage_WIRE_3_ctr; // @[tage.scala:97:{29,87}]
assign _s2_req_rtage_T_3 = _s2_req_rtage_WIRE_4[2:0]; // @[tage.scala:97:87]
assign _s2_req_rtage_WIRE_3_ctr = _s2_req_rtage_T_3; // @[tage.scala:97:87]
assign _s2_req_rtage_T_4 = _s2_req_rtage_WIRE_4[10:3]; // @[tage.scala:97:87]
assign _s2_req_rtage_WIRE_3_tag = _s2_req_rtage_T_4; // @[tage.scala:97:87]
assign _s2_req_rtage_T_5 = _s2_req_rtage_WIRE_4[11]; // @[tage.scala:97:87]
assign _s2_req_rtage_WIRE_3_valid = _s2_req_rtage_T_5; // @[tage.scala:97:87]
wire _s2_req_rtage_T_8; // @[tage.scala:97:87]
wire [7:0] _s2_req_rtage_T_7; // @[tage.scala:97:87]
wire s2_req_rtage_2_valid = _s2_req_rtage_WIRE_5_valid; // @[tage.scala:97:{29,87}]
wire [2:0] _s2_req_rtage_T_6; // @[tage.scala:97:87]
wire [7:0] s2_req_rtage_2_tag = _s2_req_rtage_WIRE_5_tag; // @[tage.scala:97:{29,87}]
wire [2:0] s2_req_rtage_2_ctr = _s2_req_rtage_WIRE_5_ctr; // @[tage.scala:97:{29,87}]
assign _s2_req_rtage_T_6 = _s2_req_rtage_WIRE_6[2:0]; // @[tage.scala:97:87]
assign _s2_req_rtage_WIRE_5_ctr = _s2_req_rtage_T_6; // @[tage.scala:97:87]
assign _s2_req_rtage_T_7 = _s2_req_rtage_WIRE_6[10:3]; // @[tage.scala:97:87]
assign _s2_req_rtage_WIRE_5_tag = _s2_req_rtage_T_7; // @[tage.scala:97:87]
assign _s2_req_rtage_T_8 = _s2_req_rtage_WIRE_6[11]; // @[tage.scala:97:87]
assign _s2_req_rtage_WIRE_5_valid = _s2_req_rtage_T_8; // @[tage.scala:97:87]
wire _s2_req_rtage_T_11; // @[tage.scala:97:87]
wire [7:0] _s2_req_rtage_T_10; // @[tage.scala:97:87]
wire s2_req_rtage_3_valid = _s2_req_rtage_WIRE_7_valid; // @[tage.scala:97:{29,87}]
wire [2:0] _s2_req_rtage_T_9; // @[tage.scala:97:87]
wire [7:0] s2_req_rtage_3_tag = _s2_req_rtage_WIRE_7_tag; // @[tage.scala:97:{29,87}]
wire [2:0] s2_req_rtage_3_ctr = _s2_req_rtage_WIRE_7_ctr; // @[tage.scala:97:{29,87}]
assign _s2_req_rtage_T_9 = _s2_req_rtage_WIRE_8[2:0]; // @[tage.scala:97:87]
assign _s2_req_rtage_WIRE_7_ctr = _s2_req_rtage_T_9; // @[tage.scala:97:87]
assign _s2_req_rtage_T_10 = _s2_req_rtage_WIRE_8[10:3]; // @[tage.scala:97:87]
assign _s2_req_rtage_WIRE_7_tag = _s2_req_rtage_T_10; // @[tage.scala:97:87]
assign _s2_req_rtage_T_11 = _s2_req_rtage_WIRE_8[11]; // @[tage.scala:97:87]
assign _s2_req_rtage_WIRE_7_valid = _s2_req_rtage_T_11; // @[tage.scala:97:87]
wire _s2_req_rhits_T = s2_req_rtage_0_tag == s2_tag; // @[tage.scala:95:29, :97:29, :100:69]
wire _s2_req_rhits_T_1 = s2_req_rtage_0_valid & _s2_req_rhits_T; // @[tage.scala:97:29, :100:{60,69}]
wire _s2_req_rhits_T_2 = ~doing_reset; // @[tage.scala:72:28, :100:83]
wire _s2_req_rhits_T_3 = _s2_req_rhits_T_1 & _s2_req_rhits_T_2; // @[tage.scala:100:{60,80,83}]
wire s2_req_rhits_0 = _s2_req_rhits_T_3; // @[tage.scala:100:{29,80}]
wire _s2_req_rhits_T_4 = s2_req_rtage_1_tag == s2_tag; // @[tage.scala:95:29, :97:29, :100:69]
wire _s2_req_rhits_T_5 = s2_req_rtage_1_valid & _s2_req_rhits_T_4; // @[tage.scala:97:29, :100:{60,69}]
wire _s2_req_rhits_T_6 = ~doing_reset; // @[tage.scala:72:28, :100:83]
wire _s2_req_rhits_T_7 = _s2_req_rhits_T_5 & _s2_req_rhits_T_6; // @[tage.scala:100:{60,80,83}]
wire s2_req_rhits_1 = _s2_req_rhits_T_7; // @[tage.scala:100:{29,80}]
wire _s2_req_rhits_T_8 = s2_req_rtage_2_tag == s2_tag; // @[tage.scala:95:29, :97:29, :100:69]
wire _s2_req_rhits_T_9 = s2_req_rtage_2_valid & _s2_req_rhits_T_8; // @[tage.scala:97:29, :100:{60,69}]
wire _s2_req_rhits_T_10 = ~doing_reset; // @[tage.scala:72:28, :100:83]
wire _s2_req_rhits_T_11 = _s2_req_rhits_T_9 & _s2_req_rhits_T_10; // @[tage.scala:100:{60,80,83}]
wire s2_req_rhits_2 = _s2_req_rhits_T_11; // @[tage.scala:100:{29,80}]
wire _s2_req_rhits_T_12 = s2_req_rtage_3_tag == s2_tag; // @[tage.scala:95:29, :97:29, :100:69]
wire _s2_req_rhits_T_13 = s2_req_rtage_3_valid & _s2_req_rhits_T_12; // @[tage.scala:97:29, :100:{60,69}]
wire _s2_req_rhits_T_14 = ~doing_reset; // @[tage.scala:72:28, :100:83]
wire _s2_req_rhits_T_15 = _s2_req_rhits_T_13 & _s2_req_rhits_T_14; // @[tage.scala:100:{60,80,83}]
wire s2_req_rhits_3 = _s2_req_rhits_T_15; // @[tage.scala:100:{29,80}]
reg io_f3_resp_0_valid_REG; // @[tage.scala:104:38]
assign io_f3_resp_0_valid_0 = io_f3_resp_0_valid_REG; // @[tage.scala:24:7, :104:38]
wire [1:0] _io_f3_resp_0_bits_u_T = {_hi_us_R0_data[0], _lo_us_R0_data[0]}; // @[tage.scala:89:27, :90:27, :105:42]
reg [1:0] io_f3_resp_0_bits_u_REG; // @[tage.scala:105:38]
assign io_f3_resp_0_bits_u_0 = io_f3_resp_0_bits_u_REG; // @[tage.scala:24:7, :105:38]
reg [2:0] io_f3_resp_0_bits_ctr_REG; // @[tage.scala:106:38]
assign io_f3_resp_0_bits_ctr_0 = io_f3_resp_0_bits_ctr_REG; // @[tage.scala:24:7, :106:38]
reg io_f3_resp_1_valid_REG; // @[tage.scala:104:38]
assign io_f3_resp_1_valid_0 = io_f3_resp_1_valid_REG; // @[tage.scala:24:7, :104:38]
wire [1:0] _io_f3_resp_1_bits_u_T = {_hi_us_R0_data[1], _lo_us_R0_data[1]}; // @[tage.scala:89:27, :90:27, :105:42]
reg [1:0] io_f3_resp_1_bits_u_REG; // @[tage.scala:105:38]
assign io_f3_resp_1_bits_u_0 = io_f3_resp_1_bits_u_REG; // @[tage.scala:24:7, :105:38]
reg [2:0] io_f3_resp_1_bits_ctr_REG; // @[tage.scala:106:38]
assign io_f3_resp_1_bits_ctr_0 = io_f3_resp_1_bits_ctr_REG; // @[tage.scala:24:7, :106:38]
reg io_f3_resp_2_valid_REG; // @[tage.scala:104:38]
assign io_f3_resp_2_valid_0 = io_f3_resp_2_valid_REG; // @[tage.scala:24:7, :104:38]
wire [1:0] _io_f3_resp_2_bits_u_T = {_hi_us_R0_data[2], _lo_us_R0_data[2]}; // @[tage.scala:89:27, :90:27, :105:42]
reg [1:0] io_f3_resp_2_bits_u_REG; // @[tage.scala:105:38]
assign io_f3_resp_2_bits_u_0 = io_f3_resp_2_bits_u_REG; // @[tage.scala:24:7, :105:38]
reg [2:0] io_f3_resp_2_bits_ctr_REG; // @[tage.scala:106:38]
assign io_f3_resp_2_bits_ctr_0 = io_f3_resp_2_bits_ctr_REG; // @[tage.scala:24:7, :106:38]
reg io_f3_resp_3_valid_REG; // @[tage.scala:104:38]
assign io_f3_resp_3_valid_0 = io_f3_resp_3_valid_REG; // @[tage.scala:24:7, :104:38]
wire [1:0] _io_f3_resp_3_bits_u_T = {_hi_us_R0_data[3], _lo_us_R0_data[3]}; // @[tage.scala:89:27, :90:27, :105:42]
reg [1:0] io_f3_resp_3_bits_u_REG; // @[tage.scala:105:38]
assign io_f3_resp_3_bits_u_0 = io_f3_resp_3_bits_u_REG; // @[tage.scala:24:7, :105:38]
reg [2:0] io_f3_resp_3_bits_ctr_REG; // @[tage.scala:106:38]
assign io_f3_resp_3_bits_ctr_0 = io_f3_resp_3_bits_ctr_REG; // @[tage.scala:24:7, :106:38]
reg [19:0] clear_u_ctr; // @[tage.scala:109:28]
wire [20:0] _clear_u_ctr_T = {1'h0, clear_u_ctr} + 21'h1; // @[tage.scala:109:28, :110:85]
wire [19:0] _clear_u_ctr_T_1 = _clear_u_ctr_T[19:0]; // @[tage.scala:110:85]
wire [10:0] _doing_clear_u_T = clear_u_ctr[10:0]; // @[tage.scala:109:28, :112:34]
wire doing_clear_u = _doing_clear_u_T == 11'h0; // @[tage.scala:112:{34,61}]
wire _doing_clear_u_hi_T = clear_u_ctr[19]; // @[tage.scala:109:28, :113:54]
wire _doing_clear_u_lo_T = clear_u_ctr[19]; // @[tage.scala:109:28, :113:54, :114:54]
wire _doing_clear_u_hi_T_1 = _doing_clear_u_hi_T; // @[tage.scala:113:{54,95}]
wire doing_clear_u_hi = doing_clear_u & _doing_clear_u_hi_T_1; // @[tage.scala:112:61, :113:{40,95}]
wire _doing_clear_u_lo_T_1 = ~_doing_clear_u_lo_T; // @[tage.scala:114:{54,95}]
wire doing_clear_u_lo = doing_clear_u & _doing_clear_u_lo_T_1; // @[tage.scala:112:61, :114:{40,95}]
wire [8:0] clear_u_idx = clear_u_ctr[19:11]; // @[tage.scala:109:28, :115:33]
wire [7:0] idx_history_1 = io_update_hist_0[7:0]; // @[tage.scala:24:7, :53:11]
wire [7:0] tag_history_1 = io_update_hist_0[7:0]; // @[tage.scala:24:7, :53:11]
wire [28:0] _tag_T_2 = io_update_pc_0[39:11]; // @[frontend.scala:162:35]
wire [36:0] _idx_T_1 = {_tag_T_2, io_update_pc_0[10:3] ^ idx_history_1}; // @[frontend.scala:162:35]
wire [7:0] update_idx = _idx_T_1[7:0]; // @[tage.scala:60:{29,43}]
wire [28:0] _tag_T_3 = {_tag_T_2[28:8], _tag_T_2[7:0] ^ tag_history_1}; // @[tage.scala:53:11, :62:{30,50}]
wire [7:0] update_tag = _tag_T_3[7:0]; // @[tage.scala:62:{50,64}]
wire [7:0] update_wdata_0_tag = update_tag; // @[tage.scala:62:64, :119:26]
wire [7:0] update_wdata_1_tag = update_tag; // @[tage.scala:62:64, :119:26]
wire [7:0] update_wdata_2_tag = update_tag; // @[tage.scala:62:64, :119:26]
wire [7:0] update_wdata_3_tag = update_tag; // @[tage.scala:62:64, :119:26]
wire [2:0] _update_wdata_0_ctr_T_22; // @[tage.scala:155:33]
wire [2:0] _update_wdata_1_ctr_T_22; // @[tage.scala:155:33]
wire [2:0] _update_wdata_2_ctr_T_22; // @[tage.scala:155:33]
wire [2:0] _update_wdata_3_ctr_T_22; // @[tage.scala:155:33]
wire [2:0] update_wdata_0_ctr; // @[tage.scala:119:26]
wire [2:0] update_wdata_1_ctr; // @[tage.scala:119:26]
wire [2:0] update_wdata_2_ctr; // @[tage.scala:119:26]
wire [2:0] update_wdata_3_ctr; // @[tage.scala:119:26]
wire [8:0] hi = {1'h1, update_wdata_0_tag}; // @[tage.scala:119:26, :123:102]
wire [8:0] hi_1 = {1'h1, update_wdata_1_tag}; // @[tage.scala:119:26, :123:102]
wire [8:0] hi_2 = {1'h1, update_wdata_2_tag}; // @[tage.scala:119:26, :123:102]
wire [8:0] hi_3 = {1'h1, update_wdata_3_tag}; // @[tage.scala:119:26, :123:102]
assign table_MPORT_data_0 = doing_reset ? 12'h0 : {hi, update_wdata_0_ctr}; // @[tage.scala:72:28, :119:26, :123:{8,102}]
assign table_MPORT_data_1 = doing_reset ? 12'h0 : {hi_1, update_wdata_1_ctr}; // @[tage.scala:72:28, :119:26, :123:{8,102}]
assign table_MPORT_data_2 = doing_reset ? 12'h0 : {hi_2, update_wdata_2_ctr}; // @[tage.scala:72:28, :119:26, :123:{8,102}]
assign table_MPORT_data_3 = doing_reset ? 12'h0 : {hi_3, update_wdata_3_ctr}; // @[tage.scala:72:28, :119:26, :123:{8,102}]
wire [1:0] lo = {io_update_mask_1_0, io_update_mask_0_0}; // @[tage.scala:24:7, :124:90]
wire [1:0] hi_4 = {io_update_mask_3_0, io_update_mask_2_0}; // @[tage.scala:24:7, :124:90]
wire _update_hi_wdata_0_T; // @[tage.scala:166:44]
wire _update_hi_wdata_1_T; // @[tage.scala:166:44]
wire _update_hi_wdata_2_T; // @[tage.scala:166:44]
wire _update_hi_wdata_3_T; // @[tage.scala:166:44]
wire update_hi_wdata_0; // @[tage.scala:127:29]
wire update_hi_wdata_1; // @[tage.scala:127:29]
wire update_hi_wdata_2; // @[tage.scala:127:29]
wire update_hi_wdata_3; // @[tage.scala:127:29]
wire _T_20 = doing_reset | doing_clear_u_hi; // @[tage.scala:72:28, :113:40, :130:21]
assign hi_us_MPORT_1_data_0 = ~_T_20 & update_hi_wdata_0; // @[tage.scala:127:29, :130:{8,21}]
assign hi_us_MPORT_1_data_1 = ~_T_20 & update_hi_wdata_1; // @[tage.scala:127:29, :130:{8,21}]
assign hi_us_MPORT_1_data_2 = ~_T_20 & update_hi_wdata_2; // @[tage.scala:127:29, :130:{8,21}]
assign hi_us_MPORT_1_data_3 = ~_T_20 & update_hi_wdata_3; // @[tage.scala:127:29, :130:{8,21}]
wire [1:0] _GEN = {io_update_u_mask_1_0, io_update_u_mask_0_0}; // @[tage.scala:24:7, :131:80]
wire [1:0] lo_1; // @[tage.scala:131:80]
assign lo_1 = _GEN; // @[tage.scala:131:80]
wire [1:0] lo_2; // @[tage.scala:138:80]
assign lo_2 = _GEN; // @[tage.scala:131:80, :138:80]
wire [1:0] _GEN_0 = {io_update_u_mask_3_0, io_update_u_mask_2_0}; // @[tage.scala:24:7, :131:80]
wire [1:0] hi_5; // @[tage.scala:131:80]
assign hi_5 = _GEN_0; // @[tage.scala:131:80]
wire [1:0] hi_6; // @[tage.scala:138:80]
assign hi_6 = _GEN_0; // @[tage.scala:131:80, :138:80]
wire _update_lo_wdata_0_T; // @[tage.scala:167:44]
wire _update_lo_wdata_1_T; // @[tage.scala:167:44]
wire _update_lo_wdata_2_T; // @[tage.scala:167:44]
wire _update_lo_wdata_3_T; // @[tage.scala:167:44]
wire update_lo_wdata_0; // @[tage.scala:134:29]
wire update_lo_wdata_1; // @[tage.scala:134:29]
wire update_lo_wdata_2; // @[tage.scala:134:29]
wire update_lo_wdata_3; // @[tage.scala:134:29]
wire _T_33 = doing_reset | doing_clear_u_lo; // @[tage.scala:72:28, :114:40, :137:21]
assign lo_us_MPORT_2_data_0 = ~_T_33 & update_lo_wdata_0; // @[tage.scala:134:29, :137:{8,21}]
assign lo_us_MPORT_2_data_1 = ~_T_33 & update_lo_wdata_1; // @[tage.scala:134:29, :137:{8,21}]
assign lo_us_MPORT_2_data_2 = ~_T_33 & update_lo_wdata_2; // @[tage.scala:134:29, :137:{8,21}]
assign lo_us_MPORT_2_data_3 = ~_T_33 & update_lo_wdata_3; // @[tage.scala:134:29, :137:{8,21}]
reg [7:0] wrbypass_tags_0; // @[tage.scala:141:29]
reg [7:0] wrbypass_tags_1; // @[tage.scala:141:29]
reg [7:0] wrbypass_idxs_0; // @[tage.scala:142:29]
reg [7:0] wrbypass_idxs_1; // @[tage.scala:142:29]
reg [2:0] wrbypass_0_0; // @[tage.scala:143:29]
reg [2:0] wrbypass_0_1; // @[tage.scala:143:29]
reg [2:0] wrbypass_0_2; // @[tage.scala:143:29]
reg [2:0] wrbypass_0_3; // @[tage.scala:143:29]
reg [2:0] wrbypass_1_0; // @[tage.scala:143:29]
reg [2:0] wrbypass_1_1; // @[tage.scala:143:29]
reg [2:0] wrbypass_1_2; // @[tage.scala:143:29]
reg [2:0] wrbypass_1_3; // @[tage.scala:143:29]
reg wrbypass_enq_idx; // @[tage.scala:144:33]
wire _wrbypass_hits_T = ~doing_reset; // @[tage.scala:72:28, :100:83, :147:5]
wire _wrbypass_hits_T_1 = wrbypass_tags_0 == update_tag; // @[tage.scala:62:64, :141:29, :148:22]
wire _wrbypass_hits_T_2 = _wrbypass_hits_T & _wrbypass_hits_T_1; // @[tage.scala:147:{5,18}, :148:22]
wire _wrbypass_hits_T_3 = wrbypass_idxs_0 == update_idx; // @[tage.scala:60:43, :142:29, :149:22]
wire _wrbypass_hits_T_4 = _wrbypass_hits_T_2 & _wrbypass_hits_T_3; // @[tage.scala:147:18, :148:37, :149:22]
wire wrbypass_hits_0 = _wrbypass_hits_T_4; // @[tage.scala:146:33, :148:37]
wire _wrbypass_hits_T_5 = ~doing_reset; // @[tage.scala:72:28, :100:83, :147:5]
wire _wrbypass_hits_T_6 = wrbypass_tags_1 == update_tag; // @[tage.scala:62:64, :141:29, :148:22]
wire _wrbypass_hits_T_7 = _wrbypass_hits_T_5 & _wrbypass_hits_T_6; // @[tage.scala:147:{5,18}, :148:22]
wire _wrbypass_hits_T_8 = wrbypass_idxs_1 == update_idx; // @[tage.scala:60:43, :142:29, :149:22]
wire _wrbypass_hits_T_9 = _wrbypass_hits_T_7 & _wrbypass_hits_T_8; // @[tage.scala:147:18, :148:37, :149:22]
wire wrbypass_hits_1 = _wrbypass_hits_T_9; // @[tage.scala:146:33, :148:37]
wire wrbypass_hit = wrbypass_hits_0 | wrbypass_hits_1; // @[tage.scala:146:33, :151:48]
wire wrbypass_hit_idx = ~wrbypass_hits_0; // @[Mux.scala:50:70]
wire [2:0] _update_wdata_0_ctr_T = io_update_taken_0_0 ? 3'h4 : 3'h3; // @[tage.scala:24:7, :156:10]
wire _update_wdata_0_ctr_T_1 = ~io_update_taken_0_0; // @[tage.scala:24:7, :67:9]
wire [2:0] _GEN_1 = wrbypass_hit_idx ? wrbypass_1_0 : wrbypass_0_0; // @[Mux.scala:50:70]
wire [2:0] _GEN_2 = wrbypass_hit_idx ? wrbypass_1_1 : wrbypass_0_1; // @[Mux.scala:50:70]
wire [2:0] _GEN_3 = wrbypass_hit_idx ? wrbypass_1_2 : wrbypass_0_2; // @[Mux.scala:50:70]
wire [2:0] _GEN_4 = wrbypass_hit_idx ? wrbypass_1_3 : wrbypass_0_3; // @[Mux.scala:50:70]
wire _update_wdata_0_ctr_T_2 = _GEN_1 == 3'h0; // @[tage.scala:67:25]
wire [3:0] _GEN_5 = {1'h0, _GEN_1}; // @[tage.scala:67:{25,43}]
wire [3:0] _update_wdata_0_ctr_T_3 = _GEN_5 - 4'h1; // @[tage.scala:67:43]
wire [2:0] _update_wdata_0_ctr_T_4 = _update_wdata_0_ctr_T_3[2:0]; // @[tage.scala:67:43]
wire [2:0] _update_wdata_0_ctr_T_5 = _update_wdata_0_ctr_T_2 ? 3'h0 : _update_wdata_0_ctr_T_4; // @[tage.scala:67:{20,25,43}]
wire _update_wdata_0_ctr_T_6 = &_GEN_1; // @[tage.scala:67:25, :68:25]
wire [3:0] _update_wdata_0_ctr_T_7 = _GEN_5 + 4'h1; // @[tage.scala:67:43, :68:43]
wire [2:0] _update_wdata_0_ctr_T_8 = _update_wdata_0_ctr_T_7[2:0]; // @[tage.scala:68:43]
wire [2:0] _update_wdata_0_ctr_T_9 = _update_wdata_0_ctr_T_6 ? 3'h7 : _update_wdata_0_ctr_T_8; // @[tage.scala:68:{20,25,43}]
wire [2:0] _update_wdata_0_ctr_T_10 = _update_wdata_0_ctr_T_1 ? _update_wdata_0_ctr_T_5 : _update_wdata_0_ctr_T_9; // @[tage.scala:67:{8,9,20}, :68:20]
wire _update_wdata_0_ctr_T_11 = ~io_update_taken_0_0; // @[tage.scala:24:7, :67:9]
wire _update_wdata_0_ctr_T_12 = io_update_old_ctr_0_0 == 3'h0; // @[tage.scala:24:7, :67:25]
wire [3:0] _GEN_6 = {1'h0, io_update_old_ctr_0_0}; // @[tage.scala:24:7, :67:43]
wire [3:0] _update_wdata_0_ctr_T_13 = _GEN_6 - 4'h1; // @[tage.scala:67:43]
wire [2:0] _update_wdata_0_ctr_T_14 = _update_wdata_0_ctr_T_13[2:0]; // @[tage.scala:67:43]
wire [2:0] _update_wdata_0_ctr_T_15 = _update_wdata_0_ctr_T_12 ? 3'h0 : _update_wdata_0_ctr_T_14; // @[tage.scala:67:{20,25,43}]
wire _update_wdata_0_ctr_T_16 = &io_update_old_ctr_0_0; // @[tage.scala:24:7, :68:25]
wire [3:0] _update_wdata_0_ctr_T_17 = _GEN_6 + 4'h1; // @[tage.scala:67:43, :68:43]
wire [2:0] _update_wdata_0_ctr_T_18 = _update_wdata_0_ctr_T_17[2:0]; // @[tage.scala:68:43]
wire [2:0] _update_wdata_0_ctr_T_19 = _update_wdata_0_ctr_T_16 ? 3'h7 : _update_wdata_0_ctr_T_18; // @[tage.scala:68:{20,25,43}]
wire [2:0] _update_wdata_0_ctr_T_20 = _update_wdata_0_ctr_T_11 ? _update_wdata_0_ctr_T_15 : _update_wdata_0_ctr_T_19; // @[tage.scala:67:{8,9,20}, :68:20]
wire [2:0] _update_wdata_0_ctr_T_21 = wrbypass_hit ? _update_wdata_0_ctr_T_10 : _update_wdata_0_ctr_T_20; // @[tage.scala:67:8, :151:48, :159:10]
assign _update_wdata_0_ctr_T_22 = io_update_alloc_0_0 ? _update_wdata_0_ctr_T : _update_wdata_0_ctr_T_21; // @[tage.scala:24:7, :155:33, :156:10, :159:10]
assign update_wdata_0_ctr = _update_wdata_0_ctr_T_22; // @[tage.scala:119:26, :155:33]
assign _update_hi_wdata_0_T = io_update_u_0_0[1]; // @[tage.scala:24:7, :166:44]
assign update_hi_wdata_0 = _update_hi_wdata_0_T; // @[tage.scala:127:29, :166:44]
assign _update_lo_wdata_0_T = io_update_u_0_0[0]; // @[tage.scala:24:7, :167:44]
assign update_lo_wdata_0 = _update_lo_wdata_0_T; // @[tage.scala:134:29, :167:44]
wire [2:0] _update_wdata_1_ctr_T = io_update_taken_1_0 ? 3'h4 : 3'h3; // @[tage.scala:24:7, :156:10]
wire _update_wdata_1_ctr_T_1 = ~io_update_taken_1_0; // @[tage.scala:24:7, :67:9]
wire _update_wdata_1_ctr_T_2 = _GEN_2 == 3'h0; // @[tage.scala:67:25]
wire [3:0] _GEN_7 = {1'h0, _GEN_2}; // @[tage.scala:67:{25,43}]
wire [3:0] _update_wdata_1_ctr_T_3 = _GEN_7 - 4'h1; // @[tage.scala:67:43]
wire [2:0] _update_wdata_1_ctr_T_4 = _update_wdata_1_ctr_T_3[2:0]; // @[tage.scala:67:43]
wire [2:0] _update_wdata_1_ctr_T_5 = _update_wdata_1_ctr_T_2 ? 3'h0 : _update_wdata_1_ctr_T_4; // @[tage.scala:67:{20,25,43}]
wire _update_wdata_1_ctr_T_6 = &_GEN_2; // @[tage.scala:67:25, :68:25]
wire [3:0] _update_wdata_1_ctr_T_7 = _GEN_7 + 4'h1; // @[tage.scala:67:43, :68:43]
wire [2:0] _update_wdata_1_ctr_T_8 = _update_wdata_1_ctr_T_7[2:0]; // @[tage.scala:68:43]
wire [2:0] _update_wdata_1_ctr_T_9 = _update_wdata_1_ctr_T_6 ? 3'h7 : _update_wdata_1_ctr_T_8; // @[tage.scala:68:{20,25,43}]
wire [2:0] _update_wdata_1_ctr_T_10 = _update_wdata_1_ctr_T_1 ? _update_wdata_1_ctr_T_5 : _update_wdata_1_ctr_T_9; // @[tage.scala:67:{8,9,20}, :68:20]
wire _update_wdata_1_ctr_T_11 = ~io_update_taken_1_0; // @[tage.scala:24:7, :67:9]
wire _update_wdata_1_ctr_T_12 = io_update_old_ctr_1_0 == 3'h0; // @[tage.scala:24:7, :67:25]
wire [3:0] _GEN_8 = {1'h0, io_update_old_ctr_1_0}; // @[tage.scala:24:7, :67:43]
wire [3:0] _update_wdata_1_ctr_T_13 = _GEN_8 - 4'h1; // @[tage.scala:67:43]
wire [2:0] _update_wdata_1_ctr_T_14 = _update_wdata_1_ctr_T_13[2:0]; // @[tage.scala:67:43]
wire [2:0] _update_wdata_1_ctr_T_15 = _update_wdata_1_ctr_T_12 ? 3'h0 : _update_wdata_1_ctr_T_14; // @[tage.scala:67:{20,25,43}]
wire _update_wdata_1_ctr_T_16 = &io_update_old_ctr_1_0; // @[tage.scala:24:7, :68:25]
wire [3:0] _update_wdata_1_ctr_T_17 = _GEN_8 + 4'h1; // @[tage.scala:67:43, :68:43]
wire [2:0] _update_wdata_1_ctr_T_18 = _update_wdata_1_ctr_T_17[2:0]; // @[tage.scala:68:43]
wire [2:0] _update_wdata_1_ctr_T_19 = _update_wdata_1_ctr_T_16 ? 3'h7 : _update_wdata_1_ctr_T_18; // @[tage.scala:68:{20,25,43}]
wire [2:0] _update_wdata_1_ctr_T_20 = _update_wdata_1_ctr_T_11 ? _update_wdata_1_ctr_T_15 : _update_wdata_1_ctr_T_19; // @[tage.scala:67:{8,9,20}, :68:20]
wire [2:0] _update_wdata_1_ctr_T_21 = wrbypass_hit ? _update_wdata_1_ctr_T_10 : _update_wdata_1_ctr_T_20; // @[tage.scala:67:8, :151:48, :159:10]
assign _update_wdata_1_ctr_T_22 = io_update_alloc_1_0 ? _update_wdata_1_ctr_T : _update_wdata_1_ctr_T_21; // @[tage.scala:24:7, :155:33, :156:10, :159:10]
assign update_wdata_1_ctr = _update_wdata_1_ctr_T_22; // @[tage.scala:119:26, :155:33]
assign _update_hi_wdata_1_T = io_update_u_1_0[1]; // @[tage.scala:24:7, :166:44]
assign update_hi_wdata_1 = _update_hi_wdata_1_T; // @[tage.scala:127:29, :166:44]
assign _update_lo_wdata_1_T = io_update_u_1_0[0]; // @[tage.scala:24:7, :167:44]
assign update_lo_wdata_1 = _update_lo_wdata_1_T; // @[tage.scala:134:29, :167:44]
wire [2:0] _update_wdata_2_ctr_T = io_update_taken_2_0 ? 3'h4 : 3'h3; // @[tage.scala:24:7, :156:10]
wire _update_wdata_2_ctr_T_1 = ~io_update_taken_2_0; // @[tage.scala:24:7, :67:9]
wire _update_wdata_2_ctr_T_2 = _GEN_3 == 3'h0; // @[tage.scala:67:25]
wire [3:0] _GEN_9 = {1'h0, _GEN_3}; // @[tage.scala:67:{25,43}]
wire [3:0] _update_wdata_2_ctr_T_3 = _GEN_9 - 4'h1; // @[tage.scala:67:43]
wire [2:0] _update_wdata_2_ctr_T_4 = _update_wdata_2_ctr_T_3[2:0]; // @[tage.scala:67:43]
wire [2:0] _update_wdata_2_ctr_T_5 = _update_wdata_2_ctr_T_2 ? 3'h0 : _update_wdata_2_ctr_T_4; // @[tage.scala:67:{20,25,43}]
wire _update_wdata_2_ctr_T_6 = &_GEN_3; // @[tage.scala:67:25, :68:25]
wire [3:0] _update_wdata_2_ctr_T_7 = _GEN_9 + 4'h1; // @[tage.scala:67:43, :68:43]
wire [2:0] _update_wdata_2_ctr_T_8 = _update_wdata_2_ctr_T_7[2:0]; // @[tage.scala:68:43]
wire [2:0] _update_wdata_2_ctr_T_9 = _update_wdata_2_ctr_T_6 ? 3'h7 : _update_wdata_2_ctr_T_8; // @[tage.scala:68:{20,25,43}]
wire [2:0] _update_wdata_2_ctr_T_10 = _update_wdata_2_ctr_T_1 ? _update_wdata_2_ctr_T_5 : _update_wdata_2_ctr_T_9; // @[tage.scala:67:{8,9,20}, :68:20]
wire _update_wdata_2_ctr_T_11 = ~io_update_taken_2_0; // @[tage.scala:24:7, :67:9]
wire _update_wdata_2_ctr_T_12 = io_update_old_ctr_2_0 == 3'h0; // @[tage.scala:24:7, :67:25]
wire [3:0] _GEN_10 = {1'h0, io_update_old_ctr_2_0}; // @[tage.scala:24:7, :67:43]
wire [3:0] _update_wdata_2_ctr_T_13 = _GEN_10 - 4'h1; // @[tage.scala:67:43]
wire [2:0] _update_wdata_2_ctr_T_14 = _update_wdata_2_ctr_T_13[2:0]; // @[tage.scala:67:43]
wire [2:0] _update_wdata_2_ctr_T_15 = _update_wdata_2_ctr_T_12 ? 3'h0 : _update_wdata_2_ctr_T_14; // @[tage.scala:67:{20,25,43}]
wire _update_wdata_2_ctr_T_16 = &io_update_old_ctr_2_0; // @[tage.scala:24:7, :68:25]
wire [3:0] _update_wdata_2_ctr_T_17 = _GEN_10 + 4'h1; // @[tage.scala:67:43, :68:43]
wire [2:0] _update_wdata_2_ctr_T_18 = _update_wdata_2_ctr_T_17[2:0]; // @[tage.scala:68:43]
wire [2:0] _update_wdata_2_ctr_T_19 = _update_wdata_2_ctr_T_16 ? 3'h7 : _update_wdata_2_ctr_T_18; // @[tage.scala:68:{20,25,43}]
wire [2:0] _update_wdata_2_ctr_T_20 = _update_wdata_2_ctr_T_11 ? _update_wdata_2_ctr_T_15 : _update_wdata_2_ctr_T_19; // @[tage.scala:67:{8,9,20}, :68:20]
wire [2:0] _update_wdata_2_ctr_T_21 = wrbypass_hit ? _update_wdata_2_ctr_T_10 : _update_wdata_2_ctr_T_20; // @[tage.scala:67:8, :151:48, :159:10]
assign _update_wdata_2_ctr_T_22 = io_update_alloc_2_0 ? _update_wdata_2_ctr_T : _update_wdata_2_ctr_T_21; // @[tage.scala:24:7, :155:33, :156:10, :159:10]
assign update_wdata_2_ctr = _update_wdata_2_ctr_T_22; // @[tage.scala:119:26, :155:33]
assign _update_hi_wdata_2_T = io_update_u_2_0[1]; // @[tage.scala:24:7, :166:44]
assign update_hi_wdata_2 = _update_hi_wdata_2_T; // @[tage.scala:127:29, :166:44]
assign _update_lo_wdata_2_T = io_update_u_2_0[0]; // @[tage.scala:24:7, :167:44]
assign update_lo_wdata_2 = _update_lo_wdata_2_T; // @[tage.scala:134:29, :167:44]
wire [2:0] _update_wdata_3_ctr_T = io_update_taken_3_0 ? 3'h4 : 3'h3; // @[tage.scala:24:7, :156:10]
wire _update_wdata_3_ctr_T_1 = ~io_update_taken_3_0; // @[tage.scala:24:7, :67:9]
wire _update_wdata_3_ctr_T_2 = _GEN_4 == 3'h0; // @[tage.scala:67:25]
wire [3:0] _GEN_11 = {1'h0, _GEN_4}; // @[tage.scala:67:{25,43}]
wire [3:0] _update_wdata_3_ctr_T_3 = _GEN_11 - 4'h1; // @[tage.scala:67:43]
wire [2:0] _update_wdata_3_ctr_T_4 = _update_wdata_3_ctr_T_3[2:0]; // @[tage.scala:67:43]
wire [2:0] _update_wdata_3_ctr_T_5 = _update_wdata_3_ctr_T_2 ? 3'h0 : _update_wdata_3_ctr_T_4; // @[tage.scala:67:{20,25,43}]
wire _update_wdata_3_ctr_T_6 = &_GEN_4; // @[tage.scala:67:25, :68:25]
wire [3:0] _update_wdata_3_ctr_T_7 = _GEN_11 + 4'h1; // @[tage.scala:67:43, :68:43]
wire [2:0] _update_wdata_3_ctr_T_8 = _update_wdata_3_ctr_T_7[2:0]; // @[tage.scala:68:43]
wire [2:0] _update_wdata_3_ctr_T_9 = _update_wdata_3_ctr_T_6 ? 3'h7 : _update_wdata_3_ctr_T_8; // @[tage.scala:68:{20,25,43}]
wire [2:0] _update_wdata_3_ctr_T_10 = _update_wdata_3_ctr_T_1 ? _update_wdata_3_ctr_T_5 : _update_wdata_3_ctr_T_9; // @[tage.scala:67:{8,9,20}, :68:20]
wire _update_wdata_3_ctr_T_11 = ~io_update_taken_3_0; // @[tage.scala:24:7, :67:9]
wire _update_wdata_3_ctr_T_12 = io_update_old_ctr_3_0 == 3'h0; // @[tage.scala:24:7, :67:25]
wire [3:0] _GEN_12 = {1'h0, io_update_old_ctr_3_0}; // @[tage.scala:24:7, :67:43]
wire [3:0] _update_wdata_3_ctr_T_13 = _GEN_12 - 4'h1; // @[tage.scala:67:43]
wire [2:0] _update_wdata_3_ctr_T_14 = _update_wdata_3_ctr_T_13[2:0]; // @[tage.scala:67:43]
wire [2:0] _update_wdata_3_ctr_T_15 = _update_wdata_3_ctr_T_12 ? 3'h0 : _update_wdata_3_ctr_T_14; // @[tage.scala:67:{20,25,43}]
wire _update_wdata_3_ctr_T_16 = &io_update_old_ctr_3_0; // @[tage.scala:24:7, :68:25]
wire [3:0] _update_wdata_3_ctr_T_17 = _GEN_12 + 4'h1; // @[tage.scala:67:43, :68:43]
wire [2:0] _update_wdata_3_ctr_T_18 = _update_wdata_3_ctr_T_17[2:0]; // @[tage.scala:68:43]
wire [2:0] _update_wdata_3_ctr_T_19 = _update_wdata_3_ctr_T_16 ? 3'h7 : _update_wdata_3_ctr_T_18; // @[tage.scala:68:{20,25,43}]
wire [2:0] _update_wdata_3_ctr_T_20 = _update_wdata_3_ctr_T_11 ? _update_wdata_3_ctr_T_15 : _update_wdata_3_ctr_T_19; // @[tage.scala:67:{8,9,20}, :68:20]
wire [2:0] _update_wdata_3_ctr_T_21 = wrbypass_hit ? _update_wdata_3_ctr_T_10 : _update_wdata_3_ctr_T_20; // @[tage.scala:67:8, :151:48, :159:10]
assign _update_wdata_3_ctr_T_22 = io_update_alloc_3_0 ? _update_wdata_3_ctr_T : _update_wdata_3_ctr_T_21; // @[tage.scala:24:7, :155:33, :156:10, :159:10]
assign update_wdata_3_ctr = _update_wdata_3_ctr_T_22; // @[tage.scala:119:26, :155:33]
assign _update_hi_wdata_3_T = io_update_u_3_0[1]; // @[tage.scala:24:7, :166:44]
assign update_hi_wdata_3 = _update_hi_wdata_3_T; // @[tage.scala:127:29, :166:44]
assign _update_lo_wdata_3_T = io_update_u_3_0[0]; // @[tage.scala:24:7, :167:44]
assign update_lo_wdata_3 = _update_lo_wdata_3_T; // @[tage.scala:134:29, :167:44]
wire [1:0] _wrbypass_enq_idx_T = {1'h0, wrbypass_enq_idx} + 2'h1; // @[util.scala:203:14]
wire _wrbypass_enq_idx_T_1 = _wrbypass_enq_idx_T[0]; // @[util.scala:203:14]
wire _wrbypass_enq_idx_T_2 = _wrbypass_enq_idx_T_1; // @[util.scala:203:{14,20}]
wire _T_44 = io_update_mask_0_0 | io_update_mask_1_0 | io_update_mask_2_0 | io_update_mask_3_0; // @[tage.scala:24:7, :170:32]
wire _GEN_13 = wrbypass_hit ? wrbypass_hit_idx : wrbypass_enq_idx; // @[Mux.scala:50:70]
wire _GEN_14 = ~_T_44 | wrbypass_hit | wrbypass_enq_idx; // @[tage.scala:141:29, :143:29, :144:33, :151:48, :170:{32,38}, :171:39, :175:39]
wire _GEN_15 = ~_T_44 | wrbypass_hit | ~wrbypass_enq_idx; // @[tage.scala:141:29, :143:29, :144:33, :151:48, :170:{32,38}, :171:39, :175:39]
always @(posedge clock) begin // @[tage.scala:24:7]
if (reset) begin // @[tage.scala:24:7]
doing_reset <= 1'h1; // @[tage.scala:72:28]
reset_idx <= 8'h0; // @[tage.scala:73:26]
clear_u_ctr <= 20'h0; // @[tage.scala:109:28]
wrbypass_enq_idx <= 1'h0; // @[tage.scala:144:33]
end
else begin // @[tage.scala:24:7]
doing_reset <= reset_idx != 8'hFF & doing_reset; // @[tage.scala:72:28, :73:26, :75:{19,36,50}]
reset_idx <= _reset_idx_T_1; // @[tage.scala:73:26, :74:26]
clear_u_ctr <= doing_reset ? 20'h1 : _clear_u_ctr_T_1; // @[tage.scala:72:28, :109:28, :110:{22,36,70,85}]
if (~_T_44 | wrbypass_hit) begin // @[tage.scala:143:29, :144:33, :151:48, :170:{32,38}, :171:39]
end
else // @[tage.scala:144:33, :170:38, :171:39]
wrbypass_enq_idx <= _wrbypass_enq_idx_T_2; // @[util.scala:203:20]
end
s2_tag <= s1_tag; // @[tage.scala:62:64, :95:29]
io_f3_resp_0_valid_REG <= s2_req_rhits_0; // @[tage.scala:100:29, :104:38]
io_f3_resp_0_bits_u_REG <= _io_f3_resp_0_bits_u_T; // @[tage.scala:105:{38,42}]
io_f3_resp_0_bits_ctr_REG <= s2_req_rtage_0_ctr; // @[tage.scala:97:29, :106:38]
io_f3_resp_1_valid_REG <= s2_req_rhits_1; // @[tage.scala:100:29, :104:38]
io_f3_resp_1_bits_u_REG <= _io_f3_resp_1_bits_u_T; // @[tage.scala:105:{38,42}]
io_f3_resp_1_bits_ctr_REG <= s2_req_rtage_1_ctr; // @[tage.scala:97:29, :106:38]
io_f3_resp_2_valid_REG <= s2_req_rhits_2; // @[tage.scala:100:29, :104:38]
io_f3_resp_2_bits_u_REG <= _io_f3_resp_2_bits_u_T; // @[tage.scala:105:{38,42}]
io_f3_resp_2_bits_ctr_REG <= s2_req_rtage_2_ctr; // @[tage.scala:97:29, :106:38]
io_f3_resp_3_valid_REG <= s2_req_rhits_3; // @[tage.scala:100:29, :104:38]
io_f3_resp_3_bits_u_REG <= _io_f3_resp_3_bits_u_T; // @[tage.scala:105:{38,42}]
io_f3_resp_3_bits_ctr_REG <= s2_req_rtage_3_ctr; // @[tage.scala:97:29, :106:38]
if (_GEN_14) begin // @[tage.scala:141:29, :170:38, :171:39, :175:39]
end
else // @[tage.scala:141:29, :170:38, :171:39, :175:39]
wrbypass_tags_0 <= update_tag; // @[tage.scala:62:64, :141:29]
if (_GEN_15) begin // @[tage.scala:141:29, :170:38, :171:39, :175:39]
end
else // @[tage.scala:141:29, :170:38, :171:39, :175:39]
wrbypass_tags_1 <= update_tag; // @[tage.scala:62:64, :141:29]
if (_GEN_14) begin // @[tage.scala:141:29, :142:29, :170:38, :171:39, :175:39, :176:39]
end
else // @[tage.scala:142:29, :170:38, :171:39, :176:39]
wrbypass_idxs_0 <= update_idx; // @[tage.scala:60:43, :142:29]
if (_GEN_15) begin // @[tage.scala:141:29, :142:29, :170:38, :171:39, :175:39, :176:39]
end
else // @[tage.scala:142:29, :170:38, :171:39, :176:39]
wrbypass_idxs_1 <= update_idx; // @[tage.scala:60:43, :142:29]
if (~_T_44 | _GEN_13) begin // @[tage.scala:143:29, :170:{32,38}, :171:39, :172:34, :174:39]
end
else begin // @[tage.scala:143:29, :170:38, :171:39]
wrbypass_0_0 <= update_wdata_0_ctr; // @[tage.scala:119:26, :143:29]
wrbypass_0_1 <= update_wdata_1_ctr; // @[tage.scala:119:26, :143:29]
wrbypass_0_2 <= update_wdata_2_ctr; // @[tage.scala:119:26, :143:29]
wrbypass_0_3 <= update_wdata_3_ctr; // @[tage.scala:119:26, :143:29]
end
if (_T_44 & _GEN_13) begin // @[tage.scala:143:29, :170:{32,38}, :171:39, :172:34, :174:39]
wrbypass_1_0 <= update_wdata_0_ctr; // @[tage.scala:119:26, :143:29]
wrbypass_1_1 <= update_wdata_1_ctr; // @[tage.scala:119:26, :143:29]
wrbypass_1_2 <= update_wdata_2_ctr; // @[tage.scala:119:26, :143:29]
wrbypass_1_3 <= update_wdata_3_ctr; // @[tage.scala:119:26, :143:29]
end
always @(posedge)
hi_us_1 hi_us ( // @[tage.scala:89:27]
.R0_addr (_s2_req_rhius_WIRE), // @[tage.scala:98:32]
.R0_en (io_f1_req_valid_0), // @[tage.scala:24:7]
.R0_clk (clock),
.R0_data (_hi_us_R0_data),
.W0_addr (doing_reset ? reset_idx : doing_clear_u_hi ? clear_u_idx[7:0] : update_idx), // @[tage.scala:60:43, :72:28, :73:26, :113:40, :115:33, :129:{8,36}]
.W0_clk (clock),
.W0_data ({hi_us_MPORT_1_data_3, hi_us_MPORT_1_data_2, hi_us_MPORT_1_data_1, hi_us_MPORT_1_data_0}), // @[tage.scala:89:27, :130:8]
.W0_mask (_T_20 ? 4'hF : {hi_5, lo_1}) // @[tage.scala:130:21, :131:{8,80}]
); // @[tage.scala:89:27]
lo_us_1 lo_us ( // @[tage.scala:90:27]
.R0_addr (_s2_req_rlous_WIRE), // @[tage.scala:99:32]
.R0_en (io_f1_req_valid_0), // @[tage.scala:24:7]
.R0_clk (clock),
.R0_data (_lo_us_R0_data),
.W0_addr (doing_reset ? reset_idx : doing_clear_u_lo ? clear_u_idx[7:0] : update_idx), // @[tage.scala:60:43, :72:28, :73:26, :114:40, :115:33, :136:{8,36}]
.W0_clk (clock),
.W0_data ({lo_us_MPORT_2_data_3, lo_us_MPORT_2_data_2, lo_us_MPORT_2_data_1, lo_us_MPORT_2_data_0}), // @[tage.scala:90:27, :137:8]
.W0_mask (_T_33 ? 4'hF : {hi_6, lo_2}) // @[tage.scala:137:21, :138:{8,80}]
); // @[tage.scala:90:27]
table_1 table_0 ( // @[tage.scala:91:27]
.R0_addr (_s2_req_rtage_WIRE), // @[tage.scala:97:40]
.R0_en (io_f1_req_valid_0), // @[tage.scala:24:7]
.R0_clk (clock),
.R0_data (_table_R0_data),
.W0_addr (doing_reset ? reset_idx : update_idx), // @[tage.scala:60:43, :72:28, :73:26, :122:8]
.W0_clk (clock),
.W0_data ({table_MPORT_data_3, table_MPORT_data_2, table_MPORT_data_1, table_MPORT_data_0}), // @[tage.scala:91:27, :123:8]
.W0_mask (doing_reset ? 4'hF : {hi_4, lo}) // @[tage.scala:72:28, :124:{8,90}]
); // @[tage.scala:91:27]
assign io_f3_resp_0_valid = io_f3_resp_0_valid_0; // @[tage.scala:24:7]
assign io_f3_resp_0_bits_ctr = io_f3_resp_0_bits_ctr_0; // @[tage.scala:24:7]
assign io_f3_resp_0_bits_u = io_f3_resp_0_bits_u_0; // @[tage.scala:24:7]
assign io_f3_resp_1_valid = io_f3_resp_1_valid_0; // @[tage.scala:24:7]
assign io_f3_resp_1_bits_ctr = io_f3_resp_1_bits_ctr_0; // @[tage.scala:24:7]
assign io_f3_resp_1_bits_u = io_f3_resp_1_bits_u_0; // @[tage.scala:24:7]
assign io_f3_resp_2_valid = io_f3_resp_2_valid_0; // @[tage.scala:24:7]
assign io_f3_resp_2_bits_ctr = io_f3_resp_2_bits_ctr_0; // @[tage.scala:24:7]
assign io_f3_resp_2_bits_u = io_f3_resp_2_bits_u_0; // @[tage.scala:24:7]
assign io_f3_resp_3_valid = io_f3_resp_3_valid_0; // @[tage.scala:24:7]
assign io_f3_resp_3_bits_ctr = io_f3_resp_3_bits_ctr_0; // @[tage.scala:24:7]
assign io_f3_resp_3_bits_u = io_f3_resp_3_bits_u_0; // @[tage.scala:24:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RoundAnyRawFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.Fill
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundAnyRawFNToRecFN(
inExpWidth: Int,
inSigWidth: Int,
outExpWidth: Int,
outSigWidth: Int,
options: Int
)
extends RawModule
{
override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(inExpWidth, inSigWidth))
// (allowed exponent range has limits)
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0)
val effectiveInSigWidth =
if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1
val neverUnderflows =
((options &
(flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact)
) != 0) ||
(inExpWidth < outExpWidth)
val neverOverflows =
((options & flRoundOpt_neverOverflows) != 0) ||
(inExpWidth < outExpWidth)
val outNaNExp = BigInt(7)<<(outExpWidth - 2)
val outInfExp = BigInt(6)<<(outExpWidth - 2)
val outMaxFiniteExp = outInfExp - 1
val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2
val outMinNonzeroExp = outMinNormExp - outSigWidth + 1
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
val roundMagUp =
(roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sAdjustedExp =
if (inExpWidth < outExpWidth)
(io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
)(outExpWidth, 0).zext
else if (inExpWidth == outExpWidth)
io.in.sExp
else
io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
val adjustedSig =
if (inSigWidth <= outSigWidth + 2)
io.in.sig<<(outSigWidth - inSigWidth + 2)
else
(io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ##
io.in.sig(inSigWidth - outSigWidth - 2, 0).orR
)
val doShiftSigDown1 =
if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2)
val common_expOut = Wire(UInt((outExpWidth + 1).W))
val common_fractOut = Wire(UInt((outSigWidth - 1).W))
val common_overflow = Wire(Bool())
val common_totalUnderflow = Wire(Bool())
val common_underflow = Wire(Bool())
val common_inexact = Wire(Bool())
if (
neverOverflows && neverUnderflows
&& (effectiveInSigWidth <= outSigWidth)
) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1
common_fractOut :=
Mux(doShiftSigDown1,
adjustedSig(outSigWidth + 1, 3),
adjustedSig(outSigWidth, 2)
)
common_overflow := false.B
common_totalUnderflow := false.B
common_underflow := false.B
common_inexact := false.B
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundMask =
if (neverUnderflows)
0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W)
else
(lowMask(
sAdjustedExp(outExpWidth, 0),
outMinNormExp - outSigWidth - 1,
outMinNormExp
) | doShiftSigDown1) ##
3.U(2.W)
val shiftedRoundMask = 0.U(1.W) ## roundMask>>1
val roundPosMask = ~shiftedRoundMask & roundMask
val roundPosBit = (adjustedSig & roundPosMask).orR
val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR
val anyRound = roundPosBit || anyRoundExtra
val roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
roundPosBit) ||
(roundMagUp && anyRound)
val roundedSig: Bits =
Mux(roundIncr,
(((adjustedSig | roundMask)>>2) +& 1.U) &
~Mux(roundingMode_near_even && roundPosBit &&
! anyRoundExtra,
roundMask>>1,
0.U((outSigWidth + 2).W)
),
(adjustedSig & ~roundMask)>>2 |
Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U)
)
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext
common_expOut := sRoundedExp(outExpWidth, 0)
common_fractOut :=
Mux(doShiftSigDown1,
roundedSig(outSigWidth - 1, 1),
roundedSig(outSigWidth - 2, 0)
)
common_overflow :=
(if (neverOverflows) false.B else
//*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?:
(sRoundedExp>>(outExpWidth - 1) >= 3.S))
common_totalUnderflow :=
(if (neverUnderflows) false.B else
//*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?:
(sRoundedExp < outMinNonzeroExp.S))
val unboundedRange_roundPosBit =
Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1))
val unboundedRange_anyRound =
(doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR
val unboundedRange_roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
unboundedRange_roundPosBit) ||
(roundMagUp && unboundedRange_anyRound)
val roundCarry =
Mux(doShiftSigDown1,
roundedSig(outSigWidth + 1),
roundedSig(outSigWidth)
)
common_underflow :=
(if (neverUnderflows) false.B else
common_totalUnderflow ||
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
(anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) &&
Mux(doShiftSigDown1, roundMask(3), roundMask(2)) &&
! ((io.detectTininess === tininess_afterRounding) &&
! Mux(doShiftSigDown1,
roundMask(4),
roundMask(3)
) &&
roundCarry && roundPosBit &&
unboundedRange_roundIncr)))
common_inexact := common_totalUnderflow || anyRound
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val isNaNOut = io.invalidExc || io.in.isNaN
val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf
val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero
val overflow = commonCase && common_overflow
val underflow = commonCase && common_underflow
val inexact = overflow || (commonCase && common_inexact)
val overflow_roundMagUp =
roundingMode_near_even || roundingMode_near_maxMag || roundMagUp
val pegMinNonzeroMagOut =
commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd)
val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp
val notNaN_isInfOut =
notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp)
val signOut = Mux(isNaNOut, false.B, io.in.sign)
val expOut =
(common_expOut &
~Mux(io.in.isZero || common_totalUnderflow,
(BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMinNonzeroMagOut,
~outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMaxFiniteMagOut,
(BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W),
0.U
) &
~Mux(notNaN_isInfOut,
(BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
)) |
Mux(pegMinNonzeroMagOut,
outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) |
Mux(pegMaxFiniteMagOut,
outMaxFiniteExp.U((outExpWidth + 1).W),
0.U
) |
Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) |
Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U)
val fractOut =
Mux(isNaNOut || io.in.isZero || common_totalUnderflow,
Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U),
common_fractOut
) |
Fill(outSigWidth - 1, pegMaxFiniteMagOut)
io.out := signOut ## expOut ## fractOut
io.exceptionFlags :=
io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int)
extends RawModule
{
override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(expWidth, sigWidth + 2))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
expWidth, sigWidth + 2, expWidth, sigWidth, options))
roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc
roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc
roundAnyRawFNToRecFN.io.in := io.in
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
| module RoundAnyRawFNToRecFN_ie6_is32_oe11_os53( // @[RoundAnyRawFNToRecFN.scala:48:5]
input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [7:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [32:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:58:16]
output [64:0] io_out // @[RoundAnyRawFNToRecFN.scala:58:16]
);
assign io_out = {io_in_sign, {{4{io_in_sExp[7]}}, io_in_sExp} + 12'h7C0 & ~(io_in_isZero ? 12'hE00 : 12'h0), io_in_isZero ? 52'h0 : {io_in_sig[30:0], 21'h0}}; // @[RoundAnyRawFNToRecFN.scala:48:5, :104:25, :140:28, :252:24, :253:{14,18}, :280:12, :281:16, :286:33]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftRegisterPriorityQueue.scala:
package compressacc
import chisel3._
import chisel3.util._
import chisel3.util._
// TODO : support enq & deq at the same cycle
class PriorityQueueStageIO(keyWidth: Int, value: ValueInfo) extends Bundle {
val output_prev = KeyValue(keyWidth, value)
val output_nxt = KeyValue(keyWidth, value)
val input_prev = Flipped(KeyValue(keyWidth, value))
val input_nxt = Flipped(KeyValue(keyWidth, value))
val cmd = Flipped(Valid(UInt(1.W)))
val insert_here = Input(Bool())
val cur_input_keyval = Flipped(KeyValue(keyWidth, value))
val cur_output_keyval = KeyValue(keyWidth, value)
}
class PriorityQueueStage(keyWidth: Int, value: ValueInfo) extends Module {
val io = IO(new PriorityQueueStageIO(keyWidth, value))
dontTouch(io)
val CMD_DEQ = 0.U
val CMD_ENQ = 1.U
val MAX_VALUE = (1 << keyWidth) - 1
val key_reg = RegInit(MAX_VALUE.U(keyWidth.W))
val value_reg = Reg(value)
io.output_prev.key := key_reg
io.output_prev.value := value_reg
io.output_nxt.key := key_reg
io.output_nxt.value := value_reg
io.cur_output_keyval.key := key_reg
io.cur_output_keyval.value := value_reg
when (io.cmd.valid) {
switch (io.cmd.bits) {
is (CMD_DEQ) {
key_reg := io.input_nxt.key
value_reg := io.input_nxt.value
}
is (CMD_ENQ) {
when (io.insert_here) {
key_reg := io.cur_input_keyval.key
value_reg := io.cur_input_keyval.value
} .elsewhen (key_reg >= io.cur_input_keyval.key) {
key_reg := io.input_prev.key
value_reg := io.input_prev.value
} .otherwise {
// do nothing
}
}
}
}
}
object PriorityQueueStage {
def apply(keyWidth: Int, v: ValueInfo): PriorityQueueStage = new PriorityQueueStage(keyWidth, v)
}
// TODO
// - This design is not scalable as the enqued_keyval is broadcasted to all the stages
// - Add pipeline registers later
class PriorityQueueIO(queSize: Int, keyWidth: Int, value: ValueInfo) extends Bundle {
val cnt_bits = log2Ceil(queSize+1)
val counter = Output(UInt(cnt_bits.W))
val enq = Flipped(Decoupled(KeyValue(keyWidth, value)))
val deq = Decoupled(KeyValue(keyWidth, value))
}
class PriorityQueue(queSize: Int, keyWidth: Int, value: ValueInfo) extends Module {
val keyWidthInternal = keyWidth + 1
val CMD_DEQ = 0.U
val CMD_ENQ = 1.U
val io = IO(new PriorityQueueIO(queSize, keyWidthInternal, value))
dontTouch(io)
val MAX_VALUE = ((1 << keyWidthInternal) - 1).U
val cnt_bits = log2Ceil(queSize+1)
// do not consider cases where we are inserting more entries then the queSize
val counter = RegInit(0.U(cnt_bits.W))
io.counter := counter
val full = (counter === queSize.U)
val empty = (counter === 0.U)
io.deq.valid := !empty
io.enq.ready := !full
when (io.enq.fire) {
counter := counter + 1.U
}
when (io.deq.fire) {
counter := counter - 1.U
}
val cmd_valid = io.enq.valid || io.deq.ready
val cmd = Mux(io.enq.valid, CMD_ENQ, CMD_DEQ)
assert(!(io.enq.valid && io.deq.ready))
val stages = Seq.fill(queSize)(Module(new PriorityQueueStage(keyWidthInternal, value)))
for (i <- 0 until (queSize - 1)) {
stages(i+1).io.input_prev <> stages(i).io.output_nxt
stages(i).io.input_nxt <> stages(i+1).io.output_prev
}
stages(queSize-1).io.input_nxt.key := MAX_VALUE
// stages(queSize-1).io.input_nxt.value :=
stages(queSize-1).io.input_nxt.value.symbol := 0.U
// stages(queSize-1).io.input_nxt.value.child(0) := 0.U
// stages(queSize-1).io.input_nxt.value.child(1) := 0.U
stages(0).io.input_prev.key := io.enq.bits.key
stages(0).io.input_prev.value <> io.enq.bits.value
for (i <- 0 until queSize) {
stages(i).io.cmd.valid := cmd_valid
stages(i).io.cmd.bits := cmd
stages(i).io.cur_input_keyval <> io.enq.bits
}
val is_large_or_equal = WireInit(VecInit(Seq.fill(queSize)(false.B)))
for (i <- 0 until queSize) {
is_large_or_equal(i) := (stages(i).io.cur_output_keyval.key >= io.enq.bits.key)
}
val is_large_or_equal_cat = Wire(UInt(queSize.W))
is_large_or_equal_cat := Cat(is_large_or_equal.reverse)
val insert_here_idx = PriorityEncoder(is_large_or_equal_cat)
for (i <- 0 until queSize) {
when (i.U === insert_here_idx) {
stages(i).io.insert_here := true.B
} .otherwise {
stages(i).io.insert_here := false.B
}
}
io.deq.bits <> stages(0).io.output_prev
}
| module PriorityQueueStage_151( // @[ShiftRegisterPriorityQueue.scala:21:7]
input clock, // @[ShiftRegisterPriorityQueue.scala:21:7]
input reset, // @[ShiftRegisterPriorityQueue.scala:21:7]
output [30:0] io_output_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_output_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [30:0] io_output_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_output_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_input_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_input_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_input_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_input_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_cmd_valid, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_cmd_bits, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_insert_here, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_cur_input_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_cur_input_keyval_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [30:0] io_cur_output_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_cur_output_keyval_value_symbol // @[ShiftRegisterPriorityQueue.scala:22:14]
);
wire [30:0] io_input_prev_key_0 = io_input_prev_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_input_prev_value_symbol_0 = io_input_prev_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_input_nxt_key_0 = io_input_nxt_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_input_nxt_value_symbol_0 = io_input_nxt_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_cmd_valid_0 = io_cmd_valid; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_cmd_bits_0 = io_cmd_bits; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_insert_here_0 = io_insert_here; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_cur_input_keyval_key_0 = io_cur_input_keyval_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_cur_input_keyval_value_symbol_0 = io_cur_input_keyval_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
reg [30:0] key_reg; // @[ShiftRegisterPriorityQueue.scala:30:24]
assign io_output_prev_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
assign io_output_nxt_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
assign io_cur_output_keyval_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
reg [9:0] value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:31:22]
assign io_output_prev_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
assign io_output_nxt_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
assign io_cur_output_keyval_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
wire _T_2 = key_reg >= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24, :52:30]
always @(posedge clock) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (reset) // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= 31'h7FFFFFFF; // @[ShiftRegisterPriorityQueue.scala:30:24]
else if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30]
key_reg <= io_input_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
end
else // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= io_input_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
end
if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7]
value_reg_symbol <= io_cur_input_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30]
value_reg_symbol <= io_input_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
end
else // @[ShiftRegisterPriorityQueue.scala:21:7]
value_reg_symbol <= io_input_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
end
always @(posedge)
assign io_output_prev_key = io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_prev_value_symbol = io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_nxt_key = io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_nxt_value_symbol = io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_cur_output_keyval_key = io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_cur_output_keyval_value_symbol = io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File util.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v4.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v4.common.{MicroOp}
import boom.v4.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, flush: Bool, uop: MicroOp): Bool = {
return apply(brupdate, flush, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, flush: Bool, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask) || flush
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: T): Bool = {
return apply(brupdate, flush, bundle.uop)
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Bool = {
return apply(brupdate, flush, bundle.bits)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, flush, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v4.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U, IS_N}
def apply(i: UInt, isel: UInt): UInt = {
val ip = Mux(isel === IS_N, 0.U(LONGEST_IMM_SZ.W), i)
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0)
}
}
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
object IsYoungerMask
{
def apply(i: UInt, head: UInt, n: Integer): UInt = {
val hi_mask = ~MaskLower(UIntToOH(i)(n-1,0))
val lo_mask = ~MaskUpper(UIntToOH(head)(n-1,0))
Mux(i < head, hi_mask & lo_mask, hi_mask | lo_mask)(n-1,0)
}
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v4.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v4.common.MicroOp => Bool = u => true.B, fastDeq: Boolean = false)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
if (fastDeq && entries > 1) {
// Pipeline dequeue selection so the mux gets an entire cycle
val main = Module(new BranchKillableQueue(gen, entries-1, flush_fn, false))
val out_reg = Reg(gen)
val out_valid = RegInit(false.B)
val out_uop = Reg(new MicroOp)
main.io.enq <> io.enq
main.io.brupdate := io.brupdate
main.io.flush := io.flush
io.empty := main.io.empty && !out_valid
io.count := main.io.count + out_valid
io.deq.valid := out_valid
io.deq.bits := out_reg
io.deq.bits.uop := out_uop
out_uop := UpdateBrMask(io.brupdate, out_uop)
out_valid := out_valid && !IsKilledByBranch(io.brupdate, false.B, out_uop) && !(io.flush && flush_fn(out_uop))
main.io.deq.ready := false.B
when (io.deq.fire || !out_valid) {
out_valid := main.io.deq.valid && !IsKilledByBranch(io.brupdate, false.B, main.io.deq.bits.uop) && !(io.flush && flush_fn(main.io.deq.bits.uop))
out_reg := main.io.deq.bits
out_uop := UpdateBrMask(io.brupdate, main.io.deq.bits.uop)
main.io.deq.ready := true.B
}
} else {
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire && !IsKilledByBranch(io.brupdate, false.B, io.enq.bits.uop) && !(io.flush && flush_fn(io.enq.bits.uop)))
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, false.B, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value)
io.deq.bits := out
val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
}
class BranchKillablePipeline[T <: boom.v4.common.HasBoomUOP](gen: T, stages: Int)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val req = Input(Valid(gen))
val flush = Input(Bool())
val brupdate = Input(new BrUpdateInfo)
val resp = Output(Vec(stages, Valid(gen)))
})
require(stages > 0)
val uops = Reg(Vec(stages, Valid(gen)))
uops(0).valid := io.req.valid && !IsKilledByBranch(io.brupdate, io.flush, io.req.bits)
uops(0).bits := UpdateBrMask(io.brupdate, io.req.bits)
for (i <- 1 until stages) {
uops(i).valid := uops(i-1).valid && !IsKilledByBranch(io.brupdate, io.flush, uops(i-1).bits)
uops(i).bits := UpdateBrMask(io.brupdate, uops(i-1).bits)
}
for (i <- 0 until stages) { when (reset.asBool) { uops(i).valid := false.B } }
io.resp := uops
}
| module BranchKillableQueue_8( // @[util.scala:458:7]
input clock, // @[util.scala:458:7]
input reset, // @[util.scala:458:7]
output io_enq_ready, // @[util.scala:463:14]
input io_enq_valid, // @[util.scala:463:14]
input [31:0] io_enq_bits_uop_inst, // @[util.scala:463:14]
input [31:0] io_enq_bits_uop_debug_inst, // @[util.scala:463:14]
input io_enq_bits_uop_is_rvc, // @[util.scala:463:14]
input [33:0] io_enq_bits_uop_debug_pc, // @[util.scala:463:14]
input io_enq_bits_uop_iq_type_0, // @[util.scala:463:14]
input io_enq_bits_uop_iq_type_1, // @[util.scala:463:14]
input io_enq_bits_uop_iq_type_2, // @[util.scala:463:14]
input io_enq_bits_uop_iq_type_3, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_0, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_1, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_2, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_3, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_4, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_5, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_6, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_7, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_8, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_9, // @[util.scala:463:14]
input io_enq_bits_uop_iw_issued, // @[util.scala:463:14]
input io_enq_bits_uop_iw_issued_partial_agen, // @[util.scala:463:14]
input io_enq_bits_uop_iw_issued_partial_dgen, // @[util.scala:463:14]
input io_enq_bits_uop_iw_p1_speculative_child, // @[util.scala:463:14]
input io_enq_bits_uop_iw_p2_speculative_child, // @[util.scala:463:14]
input io_enq_bits_uop_iw_p1_bypass_hint, // @[util.scala:463:14]
input io_enq_bits_uop_iw_p2_bypass_hint, // @[util.scala:463:14]
input io_enq_bits_uop_iw_p3_bypass_hint, // @[util.scala:463:14]
input io_enq_bits_uop_dis_col_sel, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_br_mask, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_br_tag, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_br_type, // @[util.scala:463:14]
input io_enq_bits_uop_is_sfb, // @[util.scala:463:14]
input io_enq_bits_uop_is_fence, // @[util.scala:463:14]
input io_enq_bits_uop_is_fencei, // @[util.scala:463:14]
input io_enq_bits_uop_is_sfence, // @[util.scala:463:14]
input io_enq_bits_uop_is_amo, // @[util.scala:463:14]
input io_enq_bits_uop_is_eret, // @[util.scala:463:14]
input io_enq_bits_uop_is_sys_pc2epc, // @[util.scala:463:14]
input io_enq_bits_uop_is_rocc, // @[util.scala:463:14]
input io_enq_bits_uop_is_mov, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_ftq_idx, // @[util.scala:463:14]
input io_enq_bits_uop_edge_inst, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_pc_lob, // @[util.scala:463:14]
input io_enq_bits_uop_taken, // @[util.scala:463:14]
input io_enq_bits_uop_imm_rename, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_imm_sel, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_pimm, // @[util.scala:463:14]
input [19:0] io_enq_bits_uop_imm_packed, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_op1_sel, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_op2_sel, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_ldst, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_wen, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_ren1, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_ren2, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_ren3, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_swap12, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_swap23, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_fp_ctrl_typeTagIn, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_fp_ctrl_typeTagOut, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_fromint, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_toint, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_fastpipe, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_fma, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_div, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_sqrt, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_wflags, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_vec, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_rob_idx, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_ldq_idx, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_stq_idx, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_rxq_idx, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_pdst, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_prs1, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_prs2, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_prs3, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_ppred, // @[util.scala:463:14]
input io_enq_bits_uop_prs1_busy, // @[util.scala:463:14]
input io_enq_bits_uop_prs2_busy, // @[util.scala:463:14]
input io_enq_bits_uop_prs3_busy, // @[util.scala:463:14]
input io_enq_bits_uop_ppred_busy, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_stale_pdst, // @[util.scala:463:14]
input io_enq_bits_uop_exception, // @[util.scala:463:14]
input [63:0] io_enq_bits_uop_exc_cause, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_mem_cmd, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_mem_size, // @[util.scala:463:14]
input io_enq_bits_uop_mem_signed, // @[util.scala:463:14]
input io_enq_bits_uop_uses_ldq, // @[util.scala:463:14]
input io_enq_bits_uop_uses_stq, // @[util.scala:463:14]
input io_enq_bits_uop_is_unique, // @[util.scala:463:14]
input io_enq_bits_uop_flush_on_commit, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_csr_cmd, // @[util.scala:463:14]
input io_enq_bits_uop_ldst_is_rs1, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_ldst, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_lrs1, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_lrs2, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_lrs3, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_dst_rtype, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_lrs1_rtype, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_lrs2_rtype, // @[util.scala:463:14]
input io_enq_bits_uop_frs3_en, // @[util.scala:463:14]
input io_enq_bits_uop_fcn_dw, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_fcn_op, // @[util.scala:463:14]
input io_enq_bits_uop_fp_val, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_fp_rm, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_fp_typ, // @[util.scala:463:14]
input io_enq_bits_uop_xcpt_pf_if, // @[util.scala:463:14]
input io_enq_bits_uop_xcpt_ae_if, // @[util.scala:463:14]
input io_enq_bits_uop_xcpt_ma_if, // @[util.scala:463:14]
input io_enq_bits_uop_bp_debug_if, // @[util.scala:463:14]
input io_enq_bits_uop_bp_xcpt_if, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_debug_fsrc, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_debug_tsrc, // @[util.scala:463:14]
input [33:0] io_enq_bits_addr, // @[util.scala:463:14]
input [63:0] io_enq_bits_data, // @[util.scala:463:14]
input io_enq_bits_is_hella, // @[util.scala:463:14]
input io_enq_bits_tag_match, // @[util.scala:463:14]
input [1:0] io_enq_bits_old_meta_coh_state, // @[util.scala:463:14]
input [21:0] io_enq_bits_old_meta_tag, // @[util.scala:463:14]
input [1:0] io_enq_bits_way_en, // @[util.scala:463:14]
input [4:0] io_enq_bits_sdq_id, // @[util.scala:463:14]
input io_deq_ready, // @[util.scala:463:14]
output io_deq_valid, // @[util.scala:463:14]
output [31:0] io_deq_bits_uop_inst, // @[util.scala:463:14]
output [31:0] io_deq_bits_uop_debug_inst, // @[util.scala:463:14]
output io_deq_bits_uop_is_rvc, // @[util.scala:463:14]
output [33:0] io_deq_bits_uop_debug_pc, // @[util.scala:463:14]
output io_deq_bits_uop_iq_type_0, // @[util.scala:463:14]
output io_deq_bits_uop_iq_type_1, // @[util.scala:463:14]
output io_deq_bits_uop_iq_type_2, // @[util.scala:463:14]
output io_deq_bits_uop_iq_type_3, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_0, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_1, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_2, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_3, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_4, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_5, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_6, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_7, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_8, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_9, // @[util.scala:463:14]
output io_deq_bits_uop_iw_issued, // @[util.scala:463:14]
output io_deq_bits_uop_iw_issued_partial_agen, // @[util.scala:463:14]
output io_deq_bits_uop_iw_issued_partial_dgen, // @[util.scala:463:14]
output io_deq_bits_uop_iw_p1_speculative_child, // @[util.scala:463:14]
output io_deq_bits_uop_iw_p2_speculative_child, // @[util.scala:463:14]
output io_deq_bits_uop_iw_p1_bypass_hint, // @[util.scala:463:14]
output io_deq_bits_uop_iw_p2_bypass_hint, // @[util.scala:463:14]
output io_deq_bits_uop_iw_p3_bypass_hint, // @[util.scala:463:14]
output io_deq_bits_uop_dis_col_sel, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_br_mask, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_br_tag, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_br_type, // @[util.scala:463:14]
output io_deq_bits_uop_is_sfb, // @[util.scala:463:14]
output io_deq_bits_uop_is_fence, // @[util.scala:463:14]
output io_deq_bits_uop_is_fencei, // @[util.scala:463:14]
output io_deq_bits_uop_is_sfence, // @[util.scala:463:14]
output io_deq_bits_uop_is_amo, // @[util.scala:463:14]
output io_deq_bits_uop_is_eret, // @[util.scala:463:14]
output io_deq_bits_uop_is_sys_pc2epc, // @[util.scala:463:14]
output io_deq_bits_uop_is_rocc, // @[util.scala:463:14]
output io_deq_bits_uop_is_mov, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_ftq_idx, // @[util.scala:463:14]
output io_deq_bits_uop_edge_inst, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_pc_lob, // @[util.scala:463:14]
output io_deq_bits_uop_taken, // @[util.scala:463:14]
output io_deq_bits_uop_imm_rename, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_imm_sel, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_pimm, // @[util.scala:463:14]
output [19:0] io_deq_bits_uop_imm_packed, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_op1_sel, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_op2_sel, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_ldst, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_wen, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_ren1, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_ren2, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_ren3, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_swap12, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_swap23, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_fp_ctrl_typeTagIn, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_fp_ctrl_typeTagOut, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_fromint, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_toint, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_fastpipe, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_fma, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_div, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_sqrt, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_wflags, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_vec, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_rob_idx, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_ldq_idx, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_stq_idx, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_rxq_idx, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_pdst, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_prs1, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_prs2, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_prs3, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_ppred, // @[util.scala:463:14]
output io_deq_bits_uop_prs1_busy, // @[util.scala:463:14]
output io_deq_bits_uop_prs2_busy, // @[util.scala:463:14]
output io_deq_bits_uop_prs3_busy, // @[util.scala:463:14]
output io_deq_bits_uop_ppred_busy, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_stale_pdst, // @[util.scala:463:14]
output io_deq_bits_uop_exception, // @[util.scala:463:14]
output [63:0] io_deq_bits_uop_exc_cause, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_mem_cmd, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_mem_size, // @[util.scala:463:14]
output io_deq_bits_uop_mem_signed, // @[util.scala:463:14]
output io_deq_bits_uop_uses_ldq, // @[util.scala:463:14]
output io_deq_bits_uop_uses_stq, // @[util.scala:463:14]
output io_deq_bits_uop_is_unique, // @[util.scala:463:14]
output io_deq_bits_uop_flush_on_commit, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_csr_cmd, // @[util.scala:463:14]
output io_deq_bits_uop_ldst_is_rs1, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_ldst, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_lrs1, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_lrs2, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_lrs3, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_dst_rtype, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_lrs1_rtype, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_lrs2_rtype, // @[util.scala:463:14]
output io_deq_bits_uop_frs3_en, // @[util.scala:463:14]
output io_deq_bits_uop_fcn_dw, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_fcn_op, // @[util.scala:463:14]
output io_deq_bits_uop_fp_val, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_fp_rm, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_fp_typ, // @[util.scala:463:14]
output io_deq_bits_uop_xcpt_pf_if, // @[util.scala:463:14]
output io_deq_bits_uop_xcpt_ae_if, // @[util.scala:463:14]
output io_deq_bits_uop_xcpt_ma_if, // @[util.scala:463:14]
output io_deq_bits_uop_bp_debug_if, // @[util.scala:463:14]
output io_deq_bits_uop_bp_xcpt_if, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_debug_fsrc, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_debug_tsrc, // @[util.scala:463:14]
output [33:0] io_deq_bits_addr, // @[util.scala:463:14]
output [63:0] io_deq_bits_data, // @[util.scala:463:14]
output io_deq_bits_is_hella, // @[util.scala:463:14]
output io_deq_bits_tag_match, // @[util.scala:463:14]
output [1:0] io_deq_bits_old_meta_coh_state, // @[util.scala:463:14]
output [21:0] io_deq_bits_old_meta_tag, // @[util.scala:463:14]
output [1:0] io_deq_bits_way_en, // @[util.scala:463:14]
output [4:0] io_deq_bits_sdq_id, // @[util.scala:463:14]
output io_empty, // @[util.scala:463:14]
output [3:0] io_count // @[util.scala:463:14]
);
wire [130:0] _ram_ext_R0_data; // @[util.scala:503:22]
wire io_enq_valid_0 = io_enq_valid; // @[util.scala:458:7]
wire [31:0] io_enq_bits_uop_inst_0 = io_enq_bits_uop_inst; // @[util.scala:458:7]
wire [31:0] io_enq_bits_uop_debug_inst_0 = io_enq_bits_uop_debug_inst; // @[util.scala:458:7]
wire io_enq_bits_uop_is_rvc_0 = io_enq_bits_uop_is_rvc; // @[util.scala:458:7]
wire [33:0] io_enq_bits_uop_debug_pc_0 = io_enq_bits_uop_debug_pc; // @[util.scala:458:7]
wire io_enq_bits_uop_iq_type_0_0 = io_enq_bits_uop_iq_type_0; // @[util.scala:458:7]
wire io_enq_bits_uop_iq_type_1_0 = io_enq_bits_uop_iq_type_1; // @[util.scala:458:7]
wire io_enq_bits_uop_iq_type_2_0 = io_enq_bits_uop_iq_type_2; // @[util.scala:458:7]
wire io_enq_bits_uop_iq_type_3_0 = io_enq_bits_uop_iq_type_3; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_0_0 = io_enq_bits_uop_fu_code_0; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_1_0 = io_enq_bits_uop_fu_code_1; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_2_0 = io_enq_bits_uop_fu_code_2; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_3_0 = io_enq_bits_uop_fu_code_3; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_4_0 = io_enq_bits_uop_fu_code_4; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_5_0 = io_enq_bits_uop_fu_code_5; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_6_0 = io_enq_bits_uop_fu_code_6; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_7_0 = io_enq_bits_uop_fu_code_7; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_8_0 = io_enq_bits_uop_fu_code_8; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_9_0 = io_enq_bits_uop_fu_code_9; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_issued_0 = io_enq_bits_uop_iw_issued; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_issued_partial_agen_0 = io_enq_bits_uop_iw_issued_partial_agen; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_issued_partial_dgen_0 = io_enq_bits_uop_iw_issued_partial_dgen; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_p1_speculative_child_0 = io_enq_bits_uop_iw_p1_speculative_child; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_p2_speculative_child_0 = io_enq_bits_uop_iw_p2_speculative_child; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_p1_bypass_hint_0 = io_enq_bits_uop_iw_p1_bypass_hint; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_p2_bypass_hint_0 = io_enq_bits_uop_iw_p2_bypass_hint; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_p3_bypass_hint_0 = io_enq_bits_uop_iw_p3_bypass_hint; // @[util.scala:458:7]
wire io_enq_bits_uop_dis_col_sel_0 = io_enq_bits_uop_dis_col_sel; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_br_mask_0 = io_enq_bits_uop_br_mask; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_br_tag_0 = io_enq_bits_uop_br_tag; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_br_type_0 = io_enq_bits_uop_br_type; // @[util.scala:458:7]
wire io_enq_bits_uop_is_sfb_0 = io_enq_bits_uop_is_sfb; // @[util.scala:458:7]
wire io_enq_bits_uop_is_fence_0 = io_enq_bits_uop_is_fence; // @[util.scala:458:7]
wire io_enq_bits_uop_is_fencei_0 = io_enq_bits_uop_is_fencei; // @[util.scala:458:7]
wire io_enq_bits_uop_is_sfence_0 = io_enq_bits_uop_is_sfence; // @[util.scala:458:7]
wire io_enq_bits_uop_is_amo_0 = io_enq_bits_uop_is_amo; // @[util.scala:458:7]
wire io_enq_bits_uop_is_eret_0 = io_enq_bits_uop_is_eret; // @[util.scala:458:7]
wire io_enq_bits_uop_is_sys_pc2epc_0 = io_enq_bits_uop_is_sys_pc2epc; // @[util.scala:458:7]
wire io_enq_bits_uop_is_rocc_0 = io_enq_bits_uop_is_rocc; // @[util.scala:458:7]
wire io_enq_bits_uop_is_mov_0 = io_enq_bits_uop_is_mov; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_ftq_idx_0 = io_enq_bits_uop_ftq_idx; // @[util.scala:458:7]
wire io_enq_bits_uop_edge_inst_0 = io_enq_bits_uop_edge_inst; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_pc_lob_0 = io_enq_bits_uop_pc_lob; // @[util.scala:458:7]
wire io_enq_bits_uop_taken_0 = io_enq_bits_uop_taken; // @[util.scala:458:7]
wire io_enq_bits_uop_imm_rename_0 = io_enq_bits_uop_imm_rename; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_imm_sel_0 = io_enq_bits_uop_imm_sel; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_pimm_0 = io_enq_bits_uop_pimm; // @[util.scala:458:7]
wire [19:0] io_enq_bits_uop_imm_packed_0 = io_enq_bits_uop_imm_packed; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_op1_sel_0 = io_enq_bits_uop_op1_sel; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_op2_sel_0 = io_enq_bits_uop_op2_sel; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_ldst_0 = io_enq_bits_uop_fp_ctrl_ldst; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_wen_0 = io_enq_bits_uop_fp_ctrl_wen; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_ren1_0 = io_enq_bits_uop_fp_ctrl_ren1; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_ren2_0 = io_enq_bits_uop_fp_ctrl_ren2; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_ren3_0 = io_enq_bits_uop_fp_ctrl_ren3; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_swap12_0 = io_enq_bits_uop_fp_ctrl_swap12; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_swap23_0 = io_enq_bits_uop_fp_ctrl_swap23; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_fp_ctrl_typeTagIn_0 = io_enq_bits_uop_fp_ctrl_typeTagIn; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_fp_ctrl_typeTagOut_0 = io_enq_bits_uop_fp_ctrl_typeTagOut; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_fromint_0 = io_enq_bits_uop_fp_ctrl_fromint; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_toint_0 = io_enq_bits_uop_fp_ctrl_toint; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_fastpipe_0 = io_enq_bits_uop_fp_ctrl_fastpipe; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_fma_0 = io_enq_bits_uop_fp_ctrl_fma; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_div_0 = io_enq_bits_uop_fp_ctrl_div; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_sqrt_0 = io_enq_bits_uop_fp_ctrl_sqrt; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_wflags_0 = io_enq_bits_uop_fp_ctrl_wflags; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_vec_0 = io_enq_bits_uop_fp_ctrl_vec; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_rob_idx_0 = io_enq_bits_uop_rob_idx; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_ldq_idx_0 = io_enq_bits_uop_ldq_idx; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_stq_idx_0 = io_enq_bits_uop_stq_idx; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_rxq_idx_0 = io_enq_bits_uop_rxq_idx; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_pdst_0 = io_enq_bits_uop_pdst; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_prs1_0 = io_enq_bits_uop_prs1; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_prs2_0 = io_enq_bits_uop_prs2; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_prs3_0 = io_enq_bits_uop_prs3; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_ppred_0 = io_enq_bits_uop_ppred; // @[util.scala:458:7]
wire io_enq_bits_uop_prs1_busy_0 = io_enq_bits_uop_prs1_busy; // @[util.scala:458:7]
wire io_enq_bits_uop_prs2_busy_0 = io_enq_bits_uop_prs2_busy; // @[util.scala:458:7]
wire io_enq_bits_uop_prs3_busy_0 = io_enq_bits_uop_prs3_busy; // @[util.scala:458:7]
wire io_enq_bits_uop_ppred_busy_0 = io_enq_bits_uop_ppred_busy; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_stale_pdst_0 = io_enq_bits_uop_stale_pdst; // @[util.scala:458:7]
wire io_enq_bits_uop_exception_0 = io_enq_bits_uop_exception; // @[util.scala:458:7]
wire [63:0] io_enq_bits_uop_exc_cause_0 = io_enq_bits_uop_exc_cause; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_mem_cmd_0 = io_enq_bits_uop_mem_cmd; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_mem_size_0 = io_enq_bits_uop_mem_size; // @[util.scala:458:7]
wire io_enq_bits_uop_mem_signed_0 = io_enq_bits_uop_mem_signed; // @[util.scala:458:7]
wire io_enq_bits_uop_uses_ldq_0 = io_enq_bits_uop_uses_ldq; // @[util.scala:458:7]
wire io_enq_bits_uop_uses_stq_0 = io_enq_bits_uop_uses_stq; // @[util.scala:458:7]
wire io_enq_bits_uop_is_unique_0 = io_enq_bits_uop_is_unique; // @[util.scala:458:7]
wire io_enq_bits_uop_flush_on_commit_0 = io_enq_bits_uop_flush_on_commit; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_csr_cmd_0 = io_enq_bits_uop_csr_cmd; // @[util.scala:458:7]
wire io_enq_bits_uop_ldst_is_rs1_0 = io_enq_bits_uop_ldst_is_rs1; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_ldst_0 = io_enq_bits_uop_ldst; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_lrs1_0 = io_enq_bits_uop_lrs1; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_lrs2_0 = io_enq_bits_uop_lrs2; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_lrs3_0 = io_enq_bits_uop_lrs3; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_dst_rtype_0 = io_enq_bits_uop_dst_rtype; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_lrs1_rtype_0 = io_enq_bits_uop_lrs1_rtype; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_lrs2_rtype_0 = io_enq_bits_uop_lrs2_rtype; // @[util.scala:458:7]
wire io_enq_bits_uop_frs3_en_0 = io_enq_bits_uop_frs3_en; // @[util.scala:458:7]
wire io_enq_bits_uop_fcn_dw_0 = io_enq_bits_uop_fcn_dw; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_fcn_op_0 = io_enq_bits_uop_fcn_op; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_val_0 = io_enq_bits_uop_fp_val; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_fp_rm_0 = io_enq_bits_uop_fp_rm; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_fp_typ_0 = io_enq_bits_uop_fp_typ; // @[util.scala:458:7]
wire io_enq_bits_uop_xcpt_pf_if_0 = io_enq_bits_uop_xcpt_pf_if; // @[util.scala:458:7]
wire io_enq_bits_uop_xcpt_ae_if_0 = io_enq_bits_uop_xcpt_ae_if; // @[util.scala:458:7]
wire io_enq_bits_uop_xcpt_ma_if_0 = io_enq_bits_uop_xcpt_ma_if; // @[util.scala:458:7]
wire io_enq_bits_uop_bp_debug_if_0 = io_enq_bits_uop_bp_debug_if; // @[util.scala:458:7]
wire io_enq_bits_uop_bp_xcpt_if_0 = io_enq_bits_uop_bp_xcpt_if; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_debug_fsrc_0 = io_enq_bits_uop_debug_fsrc; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_debug_tsrc_0 = io_enq_bits_uop_debug_tsrc; // @[util.scala:458:7]
wire [33:0] io_enq_bits_addr_0 = io_enq_bits_addr; // @[util.scala:458:7]
wire [63:0] io_enq_bits_data_0 = io_enq_bits_data; // @[util.scala:458:7]
wire io_enq_bits_is_hella_0 = io_enq_bits_is_hella; // @[util.scala:458:7]
wire io_enq_bits_tag_match_0 = io_enq_bits_tag_match; // @[util.scala:458:7]
wire [1:0] io_enq_bits_old_meta_coh_state_0 = io_enq_bits_old_meta_coh_state; // @[util.scala:458:7]
wire [21:0] io_enq_bits_old_meta_tag_0 = io_enq_bits_old_meta_tag; // @[util.scala:458:7]
wire [1:0] io_enq_bits_way_en_0 = io_enq_bits_way_en; // @[util.scala:458:7]
wire [4:0] io_enq_bits_sdq_id_0 = io_enq_bits_sdq_id; // @[util.scala:458:7]
wire io_deq_ready_0 = io_deq_ready; // @[util.scala:458:7]
wire _do_enq_T_4 = 1'h1; // @[util.scala:514:42]
wire _do_enq_T_7 = 1'h1; // @[util.scala:514:102]
wire _valids_0_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_0_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_1_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_1_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_2_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_2_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_3_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_3_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_4_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_4_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_5_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_5_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_6_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_6_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_7_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_7_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_8_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_8_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_9_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_9_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_10_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_10_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_11_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_11_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_12_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_12_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_13_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_13_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_14_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_14_T_6 = 1'h1; // @[util.scala:520:83]
wire [3:0] _uops_0_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_1_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_2_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_3_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_4_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_5_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_6_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_7_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_8_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_9_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_10_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_11_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_12_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_13_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_14_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_br_mask_T = 4'hF; // @[util.scala:93:27]
wire [20:0] io_brupdate_b2_target_offset = 21'h0; // @[util.scala:458:7, :463:14]
wire [63:0] io_brupdate_b2_uop_exc_cause = 64'h0; // @[util.scala:458:7, :463:14]
wire [19:0] io_brupdate_b2_uop_imm_packed = 20'h0; // @[util.scala:458:7, :463:14]
wire [4:0] io_brupdate_b2_uop_pimm = 5'h0; // @[util.scala:458:7, :463:14]
wire [4:0] io_brupdate_b2_uop_rob_idx = 5'h0; // @[util.scala:458:7, :463:14]
wire [4:0] io_brupdate_b2_uop_mem_cmd = 5'h0; // @[util.scala:458:7, :463:14]
wire [4:0] io_brupdate_b2_uop_fcn_op = 5'h0; // @[util.scala:458:7, :463:14]
wire [2:0] io_brupdate_b2_uop_imm_sel = 3'h0; // @[util.scala:458:7, :463:14]
wire [2:0] io_brupdate_b2_uop_op2_sel = 3'h0; // @[util.scala:458:7, :463:14]
wire [2:0] io_brupdate_b2_uop_csr_cmd = 3'h0; // @[util.scala:458:7, :463:14]
wire [2:0] io_brupdate_b2_uop_fp_rm = 3'h0; // @[util.scala:458:7, :463:14]
wire [2:0] io_brupdate_b2_uop_debug_fsrc = 3'h0; // @[util.scala:458:7, :463:14]
wire [2:0] io_brupdate_b2_uop_debug_tsrc = 3'h0; // @[util.scala:458:7, :463:14]
wire [2:0] io_brupdate_b2_cfi_type = 3'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_pc_lob = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_pdst = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_prs1 = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_prs2 = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_prs3 = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_stale_pdst = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_ldst = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_lrs1 = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_lrs2 = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_lrs3 = 6'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_br_tag = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_op1_sel = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagIn = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagOut = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_rxq_idx = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_mem_size = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_dst_rtype = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_lrs1_rtype = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_lrs2_rtype = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_fp_typ = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_pc_sel = 2'h0; // @[util.scala:458:7, :463:14]
wire [33:0] io_brupdate_b2_uop_debug_pc = 34'h0; // @[util.scala:458:7, :463:14]
wire [33:0] io_brupdate_b2_jalr_target = 34'h0; // @[util.scala:458:7, :463:14]
wire io_brupdate_b2_uop_is_rvc = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iq_type_0 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iq_type_1 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iq_type_2 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iq_type_3 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_0 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_1 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_2 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_3 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_4 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_5 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_6 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_7 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_8 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_9 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_issued = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_issued_partial_agen = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_issued_partial_dgen = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_p1_speculative_child = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_p2_speculative_child = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_p1_bypass_hint = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_p2_bypass_hint = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_p3_bypass_hint = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_dis_col_sel = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_sfb = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_fence = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_fencei = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_sfence = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_amo = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_eret = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_sys_pc2epc = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_rocc = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_mov = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_edge_inst = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_taken = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_imm_rename = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_ldst = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_wen = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_ren1 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_ren2 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_ren3 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_swap12 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_swap23 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_fromint = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_toint = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_fastpipe = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_fma = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_div = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_sqrt = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_wflags = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_vec = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_prs1_busy = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_prs2_busy = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_prs3_busy = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_ppred_busy = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_exception = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_mem_signed = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_uses_ldq = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_uses_stq = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_unique = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_flush_on_commit = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_ldst_is_rs1 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_frs3_en = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fcn_dw = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_val = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_xcpt_pf_if = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_xcpt_ae_if = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_xcpt_ma_if = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_bp_debug_if = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_bp_xcpt_if = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_mispredict = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_taken = 1'h0; // @[util.scala:458:7]
wire io_flush = 1'h0; // @[util.scala:458:7]
wire _valids_WIRE_0 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_1 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_2 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_3 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_4 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_5 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_6 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_7 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_8 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_9 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_10 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_11 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_12 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_13 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_14 = 1'h0; // @[util.scala:504:34]
wire _do_enq_T_2 = 1'h0; // @[util.scala:126:59]
wire _do_enq_T_3 = 1'h0; // @[util.scala:61:61]
wire _do_enq_T_6 = 1'h0; // @[util.scala:514:113]
wire _valids_0_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_0_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_0_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_1_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_1_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_1_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_2_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_2_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_2_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_3_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_3_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_3_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_4_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_4_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_4_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_5_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_5_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_5_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_6_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_6_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_6_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_7_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_7_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_7_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_8_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_8_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_8_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_9_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_9_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_9_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_10_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_10_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_10_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_11_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_11_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_11_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_12_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_12_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_12_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_13_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_13_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_13_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_14_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_14_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_14_T_5 = 1'h0; // @[util.scala:520:94]
wire [31:0] io_brupdate_b2_uop_inst = 32'h0; // @[util.scala:458:7, :463:14]
wire [31:0] io_brupdate_b2_uop_debug_inst = 32'h0; // @[util.scala:458:7, :463:14]
wire [3:0] io_brupdate_b1_resolve_mask = 4'h0; // @[util.scala:458:7]
wire [3:0] io_brupdate_b1_mispredict_mask = 4'h0; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_br_mask = 4'h0; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_br_type = 4'h0; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_ftq_idx = 4'h0; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_ldq_idx = 4'h0; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_stq_idx = 4'h0; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_ppred = 4'h0; // @[util.scala:458:7]
wire [3:0] _do_enq_T_1 = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_0_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_1_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_2_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_3_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_4_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_5_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_6_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_7_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_8_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_9_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_10_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_11_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_12_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_13_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_14_T = 4'h0; // @[util.scala:126:51]
wire _io_enq_ready_T; // @[util.scala:543:21]
wire [3:0] _uops_br_mask_T_1 = io_enq_bits_uop_br_mask_0; // @[util.scala:93:25, :458:7]
wire _io_deq_valid_T_1; // @[util.scala:548:42]
wire [31:0] out_uop_inst; // @[util.scala:545:19]
wire [31:0] out_uop_debug_inst; // @[util.scala:545:19]
wire out_uop_is_rvc; // @[util.scala:545:19]
wire [33:0] out_uop_debug_pc; // @[util.scala:545:19]
wire out_uop_iq_type_0; // @[util.scala:545:19]
wire out_uop_iq_type_1; // @[util.scala:545:19]
wire out_uop_iq_type_2; // @[util.scala:545:19]
wire out_uop_iq_type_3; // @[util.scala:545:19]
wire out_uop_fu_code_0; // @[util.scala:545:19]
wire out_uop_fu_code_1; // @[util.scala:545:19]
wire out_uop_fu_code_2; // @[util.scala:545:19]
wire out_uop_fu_code_3; // @[util.scala:545:19]
wire out_uop_fu_code_4; // @[util.scala:545:19]
wire out_uop_fu_code_5; // @[util.scala:545:19]
wire out_uop_fu_code_6; // @[util.scala:545:19]
wire out_uop_fu_code_7; // @[util.scala:545:19]
wire out_uop_fu_code_8; // @[util.scala:545:19]
wire out_uop_fu_code_9; // @[util.scala:545:19]
wire out_uop_iw_issued; // @[util.scala:545:19]
wire out_uop_iw_issued_partial_agen; // @[util.scala:545:19]
wire out_uop_iw_issued_partial_dgen; // @[util.scala:545:19]
wire out_uop_iw_p1_speculative_child; // @[util.scala:545:19]
wire out_uop_iw_p2_speculative_child; // @[util.scala:545:19]
wire out_uop_iw_p1_bypass_hint; // @[util.scala:545:19]
wire out_uop_iw_p2_bypass_hint; // @[util.scala:545:19]
wire out_uop_iw_p3_bypass_hint; // @[util.scala:545:19]
wire out_uop_dis_col_sel; // @[util.scala:545:19]
wire [3:0] out_uop_br_mask; // @[util.scala:545:19]
wire [1:0] out_uop_br_tag; // @[util.scala:545:19]
wire [3:0] out_uop_br_type; // @[util.scala:545:19]
wire out_uop_is_sfb; // @[util.scala:545:19]
wire out_uop_is_fence; // @[util.scala:545:19]
wire out_uop_is_fencei; // @[util.scala:545:19]
wire out_uop_is_sfence; // @[util.scala:545:19]
wire out_uop_is_amo; // @[util.scala:545:19]
wire out_uop_is_eret; // @[util.scala:545:19]
wire out_uop_is_sys_pc2epc; // @[util.scala:545:19]
wire out_uop_is_rocc; // @[util.scala:545:19]
wire out_uop_is_mov; // @[util.scala:545:19]
wire [3:0] out_uop_ftq_idx; // @[util.scala:545:19]
wire out_uop_edge_inst; // @[util.scala:545:19]
wire [5:0] out_uop_pc_lob; // @[util.scala:545:19]
wire out_uop_taken; // @[util.scala:545:19]
wire out_uop_imm_rename; // @[util.scala:545:19]
wire [2:0] out_uop_imm_sel; // @[util.scala:545:19]
wire [4:0] out_uop_pimm; // @[util.scala:545:19]
wire [19:0] out_uop_imm_packed; // @[util.scala:545:19]
wire [1:0] out_uop_op1_sel; // @[util.scala:545:19]
wire [2:0] out_uop_op2_sel; // @[util.scala:545:19]
wire out_uop_fp_ctrl_ldst; // @[util.scala:545:19]
wire out_uop_fp_ctrl_wen; // @[util.scala:545:19]
wire out_uop_fp_ctrl_ren1; // @[util.scala:545:19]
wire out_uop_fp_ctrl_ren2; // @[util.scala:545:19]
wire out_uop_fp_ctrl_ren3; // @[util.scala:545:19]
wire out_uop_fp_ctrl_swap12; // @[util.scala:545:19]
wire out_uop_fp_ctrl_swap23; // @[util.scala:545:19]
wire [1:0] out_uop_fp_ctrl_typeTagIn; // @[util.scala:545:19]
wire [1:0] out_uop_fp_ctrl_typeTagOut; // @[util.scala:545:19]
wire out_uop_fp_ctrl_fromint; // @[util.scala:545:19]
wire out_uop_fp_ctrl_toint; // @[util.scala:545:19]
wire out_uop_fp_ctrl_fastpipe; // @[util.scala:545:19]
wire out_uop_fp_ctrl_fma; // @[util.scala:545:19]
wire out_uop_fp_ctrl_div; // @[util.scala:545:19]
wire out_uop_fp_ctrl_sqrt; // @[util.scala:545:19]
wire out_uop_fp_ctrl_wflags; // @[util.scala:545:19]
wire out_uop_fp_ctrl_vec; // @[util.scala:545:19]
wire [4:0] out_uop_rob_idx; // @[util.scala:545:19]
wire [3:0] out_uop_ldq_idx; // @[util.scala:545:19]
wire [3:0] out_uop_stq_idx; // @[util.scala:545:19]
wire [1:0] out_uop_rxq_idx; // @[util.scala:545:19]
wire [5:0] out_uop_pdst; // @[util.scala:545:19]
wire [5:0] out_uop_prs1; // @[util.scala:545:19]
wire [5:0] out_uop_prs2; // @[util.scala:545:19]
wire [5:0] out_uop_prs3; // @[util.scala:545:19]
wire [3:0] out_uop_ppred; // @[util.scala:545:19]
wire out_uop_prs1_busy; // @[util.scala:545:19]
wire out_uop_prs2_busy; // @[util.scala:545:19]
wire out_uop_prs3_busy; // @[util.scala:545:19]
wire out_uop_ppred_busy; // @[util.scala:545:19]
wire [5:0] out_uop_stale_pdst; // @[util.scala:545:19]
wire out_uop_exception; // @[util.scala:545:19]
wire [63:0] out_uop_exc_cause; // @[util.scala:545:19]
wire [4:0] out_uop_mem_cmd; // @[util.scala:545:19]
wire [1:0] out_uop_mem_size; // @[util.scala:545:19]
wire out_uop_mem_signed; // @[util.scala:545:19]
wire out_uop_uses_ldq; // @[util.scala:545:19]
wire out_uop_uses_stq; // @[util.scala:545:19]
wire out_uop_is_unique; // @[util.scala:545:19]
wire out_uop_flush_on_commit; // @[util.scala:545:19]
wire [2:0] out_uop_csr_cmd; // @[util.scala:545:19]
wire out_uop_ldst_is_rs1; // @[util.scala:545:19]
wire [5:0] out_uop_ldst; // @[util.scala:545:19]
wire [5:0] out_uop_lrs1; // @[util.scala:545:19]
wire [5:0] out_uop_lrs2; // @[util.scala:545:19]
wire [5:0] out_uop_lrs3; // @[util.scala:545:19]
wire [1:0] out_uop_dst_rtype; // @[util.scala:545:19]
wire [1:0] out_uop_lrs1_rtype; // @[util.scala:545:19]
wire [1:0] out_uop_lrs2_rtype; // @[util.scala:545:19]
wire out_uop_frs3_en; // @[util.scala:545:19]
wire out_uop_fcn_dw; // @[util.scala:545:19]
wire [4:0] out_uop_fcn_op; // @[util.scala:545:19]
wire out_uop_fp_val; // @[util.scala:545:19]
wire [2:0] out_uop_fp_rm; // @[util.scala:545:19]
wire [1:0] out_uop_fp_typ; // @[util.scala:545:19]
wire out_uop_xcpt_pf_if; // @[util.scala:545:19]
wire out_uop_xcpt_ae_if; // @[util.scala:545:19]
wire out_uop_xcpt_ma_if; // @[util.scala:545:19]
wire out_uop_bp_debug_if; // @[util.scala:545:19]
wire out_uop_bp_xcpt_if; // @[util.scala:545:19]
wire [2:0] out_uop_debug_fsrc; // @[util.scala:545:19]
wire [2:0] out_uop_debug_tsrc; // @[util.scala:545:19]
wire [33:0] out_addr; // @[util.scala:545:19]
wire [63:0] out_data; // @[util.scala:545:19]
wire out_is_hella; // @[util.scala:545:19]
wire out_tag_match; // @[util.scala:545:19]
wire [1:0] out_old_meta_coh_state; // @[util.scala:545:19]
wire [21:0] out_old_meta_tag; // @[util.scala:545:19]
wire [1:0] out_way_en; // @[util.scala:545:19]
wire [4:0] out_sdq_id; // @[util.scala:545:19]
wire _io_empty_T_1; // @[util.scala:512:27]
wire [3:0] _io_count_T_5; // @[util.scala:556:22]
wire io_enq_ready_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iq_type_0_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iq_type_1_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iq_type_2_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iq_type_3_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_0_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_1_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_2_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_3_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_4_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_5_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_6_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_7_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_8_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_9_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7]
wire [31:0] io_deq_bits_uop_inst_0; // @[util.scala:458:7]
wire [31:0] io_deq_bits_uop_debug_inst_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_rvc_0; // @[util.scala:458:7]
wire [33:0] io_deq_bits_uop_debug_pc_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_issued_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7]
wire io_deq_bits_uop_dis_col_sel_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_br_mask_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_br_tag_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_br_type_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_sfb_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_fence_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_fencei_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_sfence_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_amo_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_eret_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_rocc_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_mov_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_ftq_idx_0; // @[util.scala:458:7]
wire io_deq_bits_uop_edge_inst_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_pc_lob_0; // @[util.scala:458:7]
wire io_deq_bits_uop_taken_0; // @[util.scala:458:7]
wire io_deq_bits_uop_imm_rename_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_imm_sel_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_pimm_0; // @[util.scala:458:7]
wire [19:0] io_deq_bits_uop_imm_packed_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_op1_sel_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_op2_sel_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_rob_idx_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_ldq_idx_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_stq_idx_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_rxq_idx_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_pdst_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_prs1_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_prs2_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_prs3_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_ppred_0; // @[util.scala:458:7]
wire io_deq_bits_uop_prs1_busy_0; // @[util.scala:458:7]
wire io_deq_bits_uop_prs2_busy_0; // @[util.scala:458:7]
wire io_deq_bits_uop_prs3_busy_0; // @[util.scala:458:7]
wire io_deq_bits_uop_ppred_busy_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_stale_pdst_0; // @[util.scala:458:7]
wire io_deq_bits_uop_exception_0; // @[util.scala:458:7]
wire [63:0] io_deq_bits_uop_exc_cause_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_mem_cmd_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_mem_size_0; // @[util.scala:458:7]
wire io_deq_bits_uop_mem_signed_0; // @[util.scala:458:7]
wire io_deq_bits_uop_uses_ldq_0; // @[util.scala:458:7]
wire io_deq_bits_uop_uses_stq_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_unique_0; // @[util.scala:458:7]
wire io_deq_bits_uop_flush_on_commit_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_csr_cmd_0; // @[util.scala:458:7]
wire io_deq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_ldst_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_lrs1_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_lrs2_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_lrs3_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_dst_rtype_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7]
wire io_deq_bits_uop_frs3_en_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fcn_dw_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_fcn_op_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_val_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_fp_rm_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_fp_typ_0; // @[util.scala:458:7]
wire io_deq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7]
wire io_deq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7]
wire io_deq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7]
wire io_deq_bits_uop_bp_debug_if_0; // @[util.scala:458:7]
wire io_deq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_debug_fsrc_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_debug_tsrc_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_old_meta_coh_state_0; // @[util.scala:458:7]
wire [21:0] io_deq_bits_old_meta_tag_0; // @[util.scala:458:7]
wire [33:0] io_deq_bits_addr_0; // @[util.scala:458:7]
wire [63:0] io_deq_bits_data_0; // @[util.scala:458:7]
wire io_deq_bits_is_hella_0; // @[util.scala:458:7]
wire io_deq_bits_tag_match_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_way_en_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_sdq_id_0; // @[util.scala:458:7]
wire io_deq_valid_0; // @[util.scala:458:7]
wire io_empty_0; // @[util.scala:458:7]
wire [3:0] io_count_0; // @[util.scala:458:7]
assign out_addr = _ram_ext_R0_data[33:0]; // @[util.scala:503:22, :545:19]
assign out_data = _ram_ext_R0_data[97:34]; // @[util.scala:503:22, :545:19]
assign out_is_hella = _ram_ext_R0_data[98]; // @[util.scala:503:22, :545:19]
assign out_tag_match = _ram_ext_R0_data[99]; // @[util.scala:503:22, :545:19]
assign out_old_meta_coh_state = _ram_ext_R0_data[101:100]; // @[util.scala:503:22, :545:19]
assign out_old_meta_tag = _ram_ext_R0_data[123:102]; // @[util.scala:503:22, :545:19]
assign out_way_en = _ram_ext_R0_data[125:124]; // @[util.scala:503:22, :545:19]
assign out_sdq_id = _ram_ext_R0_data[130:126]; // @[util.scala:503:22, :545:19]
reg valids_0; // @[util.scala:504:26]
wire _valids_0_T_4 = valids_0; // @[util.scala:504:26, :520:31]
reg valids_1; // @[util.scala:504:26]
wire _valids_1_T_4 = valids_1; // @[util.scala:504:26, :520:31]
reg valids_2; // @[util.scala:504:26]
wire _valids_2_T_4 = valids_2; // @[util.scala:504:26, :520:31]
reg valids_3; // @[util.scala:504:26]
wire _valids_3_T_4 = valids_3; // @[util.scala:504:26, :520:31]
reg valids_4; // @[util.scala:504:26]
wire _valids_4_T_4 = valids_4; // @[util.scala:504:26, :520:31]
reg valids_5; // @[util.scala:504:26]
wire _valids_5_T_4 = valids_5; // @[util.scala:504:26, :520:31]
reg valids_6; // @[util.scala:504:26]
wire _valids_6_T_4 = valids_6; // @[util.scala:504:26, :520:31]
reg valids_7; // @[util.scala:504:26]
wire _valids_7_T_4 = valids_7; // @[util.scala:504:26, :520:31]
reg valids_8; // @[util.scala:504:26]
wire _valids_8_T_4 = valids_8; // @[util.scala:504:26, :520:31]
reg valids_9; // @[util.scala:504:26]
wire _valids_9_T_4 = valids_9; // @[util.scala:504:26, :520:31]
reg valids_10; // @[util.scala:504:26]
wire _valids_10_T_4 = valids_10; // @[util.scala:504:26, :520:31]
reg valids_11; // @[util.scala:504:26]
wire _valids_11_T_4 = valids_11; // @[util.scala:504:26, :520:31]
reg valids_12; // @[util.scala:504:26]
wire _valids_12_T_4 = valids_12; // @[util.scala:504:26, :520:31]
reg valids_13; // @[util.scala:504:26]
wire _valids_13_T_4 = valids_13; // @[util.scala:504:26, :520:31]
reg valids_14; // @[util.scala:504:26]
wire _valids_14_T_4 = valids_14; // @[util.scala:504:26, :520:31]
reg [31:0] uops_0_inst; // @[util.scala:505:22]
reg [31:0] uops_0_debug_inst; // @[util.scala:505:22]
reg uops_0_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_0_debug_pc; // @[util.scala:505:22]
reg uops_0_iq_type_0; // @[util.scala:505:22]
reg uops_0_iq_type_1; // @[util.scala:505:22]
reg uops_0_iq_type_2; // @[util.scala:505:22]
reg uops_0_iq_type_3; // @[util.scala:505:22]
reg uops_0_fu_code_0; // @[util.scala:505:22]
reg uops_0_fu_code_1; // @[util.scala:505:22]
reg uops_0_fu_code_2; // @[util.scala:505:22]
reg uops_0_fu_code_3; // @[util.scala:505:22]
reg uops_0_fu_code_4; // @[util.scala:505:22]
reg uops_0_fu_code_5; // @[util.scala:505:22]
reg uops_0_fu_code_6; // @[util.scala:505:22]
reg uops_0_fu_code_7; // @[util.scala:505:22]
reg uops_0_fu_code_8; // @[util.scala:505:22]
reg uops_0_fu_code_9; // @[util.scala:505:22]
reg uops_0_iw_issued; // @[util.scala:505:22]
reg uops_0_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_0_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_0_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_0_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_0_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_0_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_0_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_0_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_0_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_0_br_mask_T_1 = uops_0_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_0_br_tag; // @[util.scala:505:22]
reg [3:0] uops_0_br_type; // @[util.scala:505:22]
reg uops_0_is_sfb; // @[util.scala:505:22]
reg uops_0_is_fence; // @[util.scala:505:22]
reg uops_0_is_fencei; // @[util.scala:505:22]
reg uops_0_is_sfence; // @[util.scala:505:22]
reg uops_0_is_amo; // @[util.scala:505:22]
reg uops_0_is_eret; // @[util.scala:505:22]
reg uops_0_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_0_is_rocc; // @[util.scala:505:22]
reg uops_0_is_mov; // @[util.scala:505:22]
reg [3:0] uops_0_ftq_idx; // @[util.scala:505:22]
reg uops_0_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_0_pc_lob; // @[util.scala:505:22]
reg uops_0_taken; // @[util.scala:505:22]
reg uops_0_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_0_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_0_pimm; // @[util.scala:505:22]
reg [19:0] uops_0_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_0_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_0_op2_sel; // @[util.scala:505:22]
reg uops_0_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_0_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_0_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_0_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_0_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_0_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_0_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_0_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_0_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_0_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_0_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_0_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_0_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_0_fp_ctrl_div; // @[util.scala:505:22]
reg uops_0_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_0_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_0_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_0_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_0_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_0_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_0_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_0_pdst; // @[util.scala:505:22]
reg [5:0] uops_0_prs1; // @[util.scala:505:22]
reg [5:0] uops_0_prs2; // @[util.scala:505:22]
reg [5:0] uops_0_prs3; // @[util.scala:505:22]
reg [3:0] uops_0_ppred; // @[util.scala:505:22]
reg uops_0_prs1_busy; // @[util.scala:505:22]
reg uops_0_prs2_busy; // @[util.scala:505:22]
reg uops_0_prs3_busy; // @[util.scala:505:22]
reg uops_0_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_0_stale_pdst; // @[util.scala:505:22]
reg uops_0_exception; // @[util.scala:505:22]
reg [63:0] uops_0_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_0_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_0_mem_size; // @[util.scala:505:22]
reg uops_0_mem_signed; // @[util.scala:505:22]
reg uops_0_uses_ldq; // @[util.scala:505:22]
reg uops_0_uses_stq; // @[util.scala:505:22]
reg uops_0_is_unique; // @[util.scala:505:22]
reg uops_0_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_0_csr_cmd; // @[util.scala:505:22]
reg uops_0_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_0_ldst; // @[util.scala:505:22]
reg [5:0] uops_0_lrs1; // @[util.scala:505:22]
reg [5:0] uops_0_lrs2; // @[util.scala:505:22]
reg [5:0] uops_0_lrs3; // @[util.scala:505:22]
reg [1:0] uops_0_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_0_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_0_lrs2_rtype; // @[util.scala:505:22]
reg uops_0_frs3_en; // @[util.scala:505:22]
reg uops_0_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_0_fcn_op; // @[util.scala:505:22]
reg uops_0_fp_val; // @[util.scala:505:22]
reg [2:0] uops_0_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_0_fp_typ; // @[util.scala:505:22]
reg uops_0_xcpt_pf_if; // @[util.scala:505:22]
reg uops_0_xcpt_ae_if; // @[util.scala:505:22]
reg uops_0_xcpt_ma_if; // @[util.scala:505:22]
reg uops_0_bp_debug_if; // @[util.scala:505:22]
reg uops_0_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_0_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_0_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_1_inst; // @[util.scala:505:22]
reg [31:0] uops_1_debug_inst; // @[util.scala:505:22]
reg uops_1_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_1_debug_pc; // @[util.scala:505:22]
reg uops_1_iq_type_0; // @[util.scala:505:22]
reg uops_1_iq_type_1; // @[util.scala:505:22]
reg uops_1_iq_type_2; // @[util.scala:505:22]
reg uops_1_iq_type_3; // @[util.scala:505:22]
reg uops_1_fu_code_0; // @[util.scala:505:22]
reg uops_1_fu_code_1; // @[util.scala:505:22]
reg uops_1_fu_code_2; // @[util.scala:505:22]
reg uops_1_fu_code_3; // @[util.scala:505:22]
reg uops_1_fu_code_4; // @[util.scala:505:22]
reg uops_1_fu_code_5; // @[util.scala:505:22]
reg uops_1_fu_code_6; // @[util.scala:505:22]
reg uops_1_fu_code_7; // @[util.scala:505:22]
reg uops_1_fu_code_8; // @[util.scala:505:22]
reg uops_1_fu_code_9; // @[util.scala:505:22]
reg uops_1_iw_issued; // @[util.scala:505:22]
reg uops_1_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_1_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_1_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_1_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_1_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_1_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_1_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_1_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_1_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_1_br_mask_T_1 = uops_1_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_1_br_tag; // @[util.scala:505:22]
reg [3:0] uops_1_br_type; // @[util.scala:505:22]
reg uops_1_is_sfb; // @[util.scala:505:22]
reg uops_1_is_fence; // @[util.scala:505:22]
reg uops_1_is_fencei; // @[util.scala:505:22]
reg uops_1_is_sfence; // @[util.scala:505:22]
reg uops_1_is_amo; // @[util.scala:505:22]
reg uops_1_is_eret; // @[util.scala:505:22]
reg uops_1_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_1_is_rocc; // @[util.scala:505:22]
reg uops_1_is_mov; // @[util.scala:505:22]
reg [3:0] uops_1_ftq_idx; // @[util.scala:505:22]
reg uops_1_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_1_pc_lob; // @[util.scala:505:22]
reg uops_1_taken; // @[util.scala:505:22]
reg uops_1_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_1_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_1_pimm; // @[util.scala:505:22]
reg [19:0] uops_1_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_1_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_1_op2_sel; // @[util.scala:505:22]
reg uops_1_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_1_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_1_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_1_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_1_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_1_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_1_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_1_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_1_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_1_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_1_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_1_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_1_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_1_fp_ctrl_div; // @[util.scala:505:22]
reg uops_1_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_1_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_1_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_1_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_1_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_1_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_1_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_1_pdst; // @[util.scala:505:22]
reg [5:0] uops_1_prs1; // @[util.scala:505:22]
reg [5:0] uops_1_prs2; // @[util.scala:505:22]
reg [5:0] uops_1_prs3; // @[util.scala:505:22]
reg [3:0] uops_1_ppred; // @[util.scala:505:22]
reg uops_1_prs1_busy; // @[util.scala:505:22]
reg uops_1_prs2_busy; // @[util.scala:505:22]
reg uops_1_prs3_busy; // @[util.scala:505:22]
reg uops_1_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_1_stale_pdst; // @[util.scala:505:22]
reg uops_1_exception; // @[util.scala:505:22]
reg [63:0] uops_1_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_1_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_1_mem_size; // @[util.scala:505:22]
reg uops_1_mem_signed; // @[util.scala:505:22]
reg uops_1_uses_ldq; // @[util.scala:505:22]
reg uops_1_uses_stq; // @[util.scala:505:22]
reg uops_1_is_unique; // @[util.scala:505:22]
reg uops_1_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_1_csr_cmd; // @[util.scala:505:22]
reg uops_1_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_1_ldst; // @[util.scala:505:22]
reg [5:0] uops_1_lrs1; // @[util.scala:505:22]
reg [5:0] uops_1_lrs2; // @[util.scala:505:22]
reg [5:0] uops_1_lrs3; // @[util.scala:505:22]
reg [1:0] uops_1_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_1_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_1_lrs2_rtype; // @[util.scala:505:22]
reg uops_1_frs3_en; // @[util.scala:505:22]
reg uops_1_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_1_fcn_op; // @[util.scala:505:22]
reg uops_1_fp_val; // @[util.scala:505:22]
reg [2:0] uops_1_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_1_fp_typ; // @[util.scala:505:22]
reg uops_1_xcpt_pf_if; // @[util.scala:505:22]
reg uops_1_xcpt_ae_if; // @[util.scala:505:22]
reg uops_1_xcpt_ma_if; // @[util.scala:505:22]
reg uops_1_bp_debug_if; // @[util.scala:505:22]
reg uops_1_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_1_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_1_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_2_inst; // @[util.scala:505:22]
reg [31:0] uops_2_debug_inst; // @[util.scala:505:22]
reg uops_2_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_2_debug_pc; // @[util.scala:505:22]
reg uops_2_iq_type_0; // @[util.scala:505:22]
reg uops_2_iq_type_1; // @[util.scala:505:22]
reg uops_2_iq_type_2; // @[util.scala:505:22]
reg uops_2_iq_type_3; // @[util.scala:505:22]
reg uops_2_fu_code_0; // @[util.scala:505:22]
reg uops_2_fu_code_1; // @[util.scala:505:22]
reg uops_2_fu_code_2; // @[util.scala:505:22]
reg uops_2_fu_code_3; // @[util.scala:505:22]
reg uops_2_fu_code_4; // @[util.scala:505:22]
reg uops_2_fu_code_5; // @[util.scala:505:22]
reg uops_2_fu_code_6; // @[util.scala:505:22]
reg uops_2_fu_code_7; // @[util.scala:505:22]
reg uops_2_fu_code_8; // @[util.scala:505:22]
reg uops_2_fu_code_9; // @[util.scala:505:22]
reg uops_2_iw_issued; // @[util.scala:505:22]
reg uops_2_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_2_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_2_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_2_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_2_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_2_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_2_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_2_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_2_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_2_br_mask_T_1 = uops_2_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_2_br_tag; // @[util.scala:505:22]
reg [3:0] uops_2_br_type; // @[util.scala:505:22]
reg uops_2_is_sfb; // @[util.scala:505:22]
reg uops_2_is_fence; // @[util.scala:505:22]
reg uops_2_is_fencei; // @[util.scala:505:22]
reg uops_2_is_sfence; // @[util.scala:505:22]
reg uops_2_is_amo; // @[util.scala:505:22]
reg uops_2_is_eret; // @[util.scala:505:22]
reg uops_2_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_2_is_rocc; // @[util.scala:505:22]
reg uops_2_is_mov; // @[util.scala:505:22]
reg [3:0] uops_2_ftq_idx; // @[util.scala:505:22]
reg uops_2_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_2_pc_lob; // @[util.scala:505:22]
reg uops_2_taken; // @[util.scala:505:22]
reg uops_2_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_2_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_2_pimm; // @[util.scala:505:22]
reg [19:0] uops_2_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_2_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_2_op2_sel; // @[util.scala:505:22]
reg uops_2_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_2_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_2_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_2_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_2_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_2_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_2_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_2_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_2_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_2_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_2_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_2_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_2_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_2_fp_ctrl_div; // @[util.scala:505:22]
reg uops_2_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_2_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_2_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_2_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_2_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_2_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_2_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_2_pdst; // @[util.scala:505:22]
reg [5:0] uops_2_prs1; // @[util.scala:505:22]
reg [5:0] uops_2_prs2; // @[util.scala:505:22]
reg [5:0] uops_2_prs3; // @[util.scala:505:22]
reg [3:0] uops_2_ppred; // @[util.scala:505:22]
reg uops_2_prs1_busy; // @[util.scala:505:22]
reg uops_2_prs2_busy; // @[util.scala:505:22]
reg uops_2_prs3_busy; // @[util.scala:505:22]
reg uops_2_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_2_stale_pdst; // @[util.scala:505:22]
reg uops_2_exception; // @[util.scala:505:22]
reg [63:0] uops_2_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_2_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_2_mem_size; // @[util.scala:505:22]
reg uops_2_mem_signed; // @[util.scala:505:22]
reg uops_2_uses_ldq; // @[util.scala:505:22]
reg uops_2_uses_stq; // @[util.scala:505:22]
reg uops_2_is_unique; // @[util.scala:505:22]
reg uops_2_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_2_csr_cmd; // @[util.scala:505:22]
reg uops_2_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_2_ldst; // @[util.scala:505:22]
reg [5:0] uops_2_lrs1; // @[util.scala:505:22]
reg [5:0] uops_2_lrs2; // @[util.scala:505:22]
reg [5:0] uops_2_lrs3; // @[util.scala:505:22]
reg [1:0] uops_2_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_2_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_2_lrs2_rtype; // @[util.scala:505:22]
reg uops_2_frs3_en; // @[util.scala:505:22]
reg uops_2_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_2_fcn_op; // @[util.scala:505:22]
reg uops_2_fp_val; // @[util.scala:505:22]
reg [2:0] uops_2_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_2_fp_typ; // @[util.scala:505:22]
reg uops_2_xcpt_pf_if; // @[util.scala:505:22]
reg uops_2_xcpt_ae_if; // @[util.scala:505:22]
reg uops_2_xcpt_ma_if; // @[util.scala:505:22]
reg uops_2_bp_debug_if; // @[util.scala:505:22]
reg uops_2_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_2_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_2_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_3_inst; // @[util.scala:505:22]
reg [31:0] uops_3_debug_inst; // @[util.scala:505:22]
reg uops_3_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_3_debug_pc; // @[util.scala:505:22]
reg uops_3_iq_type_0; // @[util.scala:505:22]
reg uops_3_iq_type_1; // @[util.scala:505:22]
reg uops_3_iq_type_2; // @[util.scala:505:22]
reg uops_3_iq_type_3; // @[util.scala:505:22]
reg uops_3_fu_code_0; // @[util.scala:505:22]
reg uops_3_fu_code_1; // @[util.scala:505:22]
reg uops_3_fu_code_2; // @[util.scala:505:22]
reg uops_3_fu_code_3; // @[util.scala:505:22]
reg uops_3_fu_code_4; // @[util.scala:505:22]
reg uops_3_fu_code_5; // @[util.scala:505:22]
reg uops_3_fu_code_6; // @[util.scala:505:22]
reg uops_3_fu_code_7; // @[util.scala:505:22]
reg uops_3_fu_code_8; // @[util.scala:505:22]
reg uops_3_fu_code_9; // @[util.scala:505:22]
reg uops_3_iw_issued; // @[util.scala:505:22]
reg uops_3_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_3_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_3_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_3_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_3_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_3_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_3_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_3_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_3_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_3_br_mask_T_1 = uops_3_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_3_br_tag; // @[util.scala:505:22]
reg [3:0] uops_3_br_type; // @[util.scala:505:22]
reg uops_3_is_sfb; // @[util.scala:505:22]
reg uops_3_is_fence; // @[util.scala:505:22]
reg uops_3_is_fencei; // @[util.scala:505:22]
reg uops_3_is_sfence; // @[util.scala:505:22]
reg uops_3_is_amo; // @[util.scala:505:22]
reg uops_3_is_eret; // @[util.scala:505:22]
reg uops_3_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_3_is_rocc; // @[util.scala:505:22]
reg uops_3_is_mov; // @[util.scala:505:22]
reg [3:0] uops_3_ftq_idx; // @[util.scala:505:22]
reg uops_3_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_3_pc_lob; // @[util.scala:505:22]
reg uops_3_taken; // @[util.scala:505:22]
reg uops_3_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_3_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_3_pimm; // @[util.scala:505:22]
reg [19:0] uops_3_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_3_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_3_op2_sel; // @[util.scala:505:22]
reg uops_3_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_3_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_3_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_3_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_3_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_3_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_3_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_3_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_3_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_3_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_3_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_3_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_3_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_3_fp_ctrl_div; // @[util.scala:505:22]
reg uops_3_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_3_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_3_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_3_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_3_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_3_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_3_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_3_pdst; // @[util.scala:505:22]
reg [5:0] uops_3_prs1; // @[util.scala:505:22]
reg [5:0] uops_3_prs2; // @[util.scala:505:22]
reg [5:0] uops_3_prs3; // @[util.scala:505:22]
reg [3:0] uops_3_ppred; // @[util.scala:505:22]
reg uops_3_prs1_busy; // @[util.scala:505:22]
reg uops_3_prs2_busy; // @[util.scala:505:22]
reg uops_3_prs3_busy; // @[util.scala:505:22]
reg uops_3_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_3_stale_pdst; // @[util.scala:505:22]
reg uops_3_exception; // @[util.scala:505:22]
reg [63:0] uops_3_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_3_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_3_mem_size; // @[util.scala:505:22]
reg uops_3_mem_signed; // @[util.scala:505:22]
reg uops_3_uses_ldq; // @[util.scala:505:22]
reg uops_3_uses_stq; // @[util.scala:505:22]
reg uops_3_is_unique; // @[util.scala:505:22]
reg uops_3_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_3_csr_cmd; // @[util.scala:505:22]
reg uops_3_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_3_ldst; // @[util.scala:505:22]
reg [5:0] uops_3_lrs1; // @[util.scala:505:22]
reg [5:0] uops_3_lrs2; // @[util.scala:505:22]
reg [5:0] uops_3_lrs3; // @[util.scala:505:22]
reg [1:0] uops_3_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_3_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_3_lrs2_rtype; // @[util.scala:505:22]
reg uops_3_frs3_en; // @[util.scala:505:22]
reg uops_3_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_3_fcn_op; // @[util.scala:505:22]
reg uops_3_fp_val; // @[util.scala:505:22]
reg [2:0] uops_3_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_3_fp_typ; // @[util.scala:505:22]
reg uops_3_xcpt_pf_if; // @[util.scala:505:22]
reg uops_3_xcpt_ae_if; // @[util.scala:505:22]
reg uops_3_xcpt_ma_if; // @[util.scala:505:22]
reg uops_3_bp_debug_if; // @[util.scala:505:22]
reg uops_3_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_3_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_3_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_4_inst; // @[util.scala:505:22]
reg [31:0] uops_4_debug_inst; // @[util.scala:505:22]
reg uops_4_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_4_debug_pc; // @[util.scala:505:22]
reg uops_4_iq_type_0; // @[util.scala:505:22]
reg uops_4_iq_type_1; // @[util.scala:505:22]
reg uops_4_iq_type_2; // @[util.scala:505:22]
reg uops_4_iq_type_3; // @[util.scala:505:22]
reg uops_4_fu_code_0; // @[util.scala:505:22]
reg uops_4_fu_code_1; // @[util.scala:505:22]
reg uops_4_fu_code_2; // @[util.scala:505:22]
reg uops_4_fu_code_3; // @[util.scala:505:22]
reg uops_4_fu_code_4; // @[util.scala:505:22]
reg uops_4_fu_code_5; // @[util.scala:505:22]
reg uops_4_fu_code_6; // @[util.scala:505:22]
reg uops_4_fu_code_7; // @[util.scala:505:22]
reg uops_4_fu_code_8; // @[util.scala:505:22]
reg uops_4_fu_code_9; // @[util.scala:505:22]
reg uops_4_iw_issued; // @[util.scala:505:22]
reg uops_4_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_4_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_4_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_4_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_4_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_4_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_4_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_4_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_4_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_4_br_mask_T_1 = uops_4_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_4_br_tag; // @[util.scala:505:22]
reg [3:0] uops_4_br_type; // @[util.scala:505:22]
reg uops_4_is_sfb; // @[util.scala:505:22]
reg uops_4_is_fence; // @[util.scala:505:22]
reg uops_4_is_fencei; // @[util.scala:505:22]
reg uops_4_is_sfence; // @[util.scala:505:22]
reg uops_4_is_amo; // @[util.scala:505:22]
reg uops_4_is_eret; // @[util.scala:505:22]
reg uops_4_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_4_is_rocc; // @[util.scala:505:22]
reg uops_4_is_mov; // @[util.scala:505:22]
reg [3:0] uops_4_ftq_idx; // @[util.scala:505:22]
reg uops_4_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_4_pc_lob; // @[util.scala:505:22]
reg uops_4_taken; // @[util.scala:505:22]
reg uops_4_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_4_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_4_pimm; // @[util.scala:505:22]
reg [19:0] uops_4_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_4_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_4_op2_sel; // @[util.scala:505:22]
reg uops_4_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_4_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_4_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_4_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_4_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_4_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_4_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_4_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_4_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_4_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_4_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_4_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_4_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_4_fp_ctrl_div; // @[util.scala:505:22]
reg uops_4_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_4_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_4_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_4_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_4_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_4_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_4_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_4_pdst; // @[util.scala:505:22]
reg [5:0] uops_4_prs1; // @[util.scala:505:22]
reg [5:0] uops_4_prs2; // @[util.scala:505:22]
reg [5:0] uops_4_prs3; // @[util.scala:505:22]
reg [3:0] uops_4_ppred; // @[util.scala:505:22]
reg uops_4_prs1_busy; // @[util.scala:505:22]
reg uops_4_prs2_busy; // @[util.scala:505:22]
reg uops_4_prs3_busy; // @[util.scala:505:22]
reg uops_4_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_4_stale_pdst; // @[util.scala:505:22]
reg uops_4_exception; // @[util.scala:505:22]
reg [63:0] uops_4_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_4_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_4_mem_size; // @[util.scala:505:22]
reg uops_4_mem_signed; // @[util.scala:505:22]
reg uops_4_uses_ldq; // @[util.scala:505:22]
reg uops_4_uses_stq; // @[util.scala:505:22]
reg uops_4_is_unique; // @[util.scala:505:22]
reg uops_4_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_4_csr_cmd; // @[util.scala:505:22]
reg uops_4_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_4_ldst; // @[util.scala:505:22]
reg [5:0] uops_4_lrs1; // @[util.scala:505:22]
reg [5:0] uops_4_lrs2; // @[util.scala:505:22]
reg [5:0] uops_4_lrs3; // @[util.scala:505:22]
reg [1:0] uops_4_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_4_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_4_lrs2_rtype; // @[util.scala:505:22]
reg uops_4_frs3_en; // @[util.scala:505:22]
reg uops_4_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_4_fcn_op; // @[util.scala:505:22]
reg uops_4_fp_val; // @[util.scala:505:22]
reg [2:0] uops_4_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_4_fp_typ; // @[util.scala:505:22]
reg uops_4_xcpt_pf_if; // @[util.scala:505:22]
reg uops_4_xcpt_ae_if; // @[util.scala:505:22]
reg uops_4_xcpt_ma_if; // @[util.scala:505:22]
reg uops_4_bp_debug_if; // @[util.scala:505:22]
reg uops_4_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_4_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_4_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_5_inst; // @[util.scala:505:22]
reg [31:0] uops_5_debug_inst; // @[util.scala:505:22]
reg uops_5_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_5_debug_pc; // @[util.scala:505:22]
reg uops_5_iq_type_0; // @[util.scala:505:22]
reg uops_5_iq_type_1; // @[util.scala:505:22]
reg uops_5_iq_type_2; // @[util.scala:505:22]
reg uops_5_iq_type_3; // @[util.scala:505:22]
reg uops_5_fu_code_0; // @[util.scala:505:22]
reg uops_5_fu_code_1; // @[util.scala:505:22]
reg uops_5_fu_code_2; // @[util.scala:505:22]
reg uops_5_fu_code_3; // @[util.scala:505:22]
reg uops_5_fu_code_4; // @[util.scala:505:22]
reg uops_5_fu_code_5; // @[util.scala:505:22]
reg uops_5_fu_code_6; // @[util.scala:505:22]
reg uops_5_fu_code_7; // @[util.scala:505:22]
reg uops_5_fu_code_8; // @[util.scala:505:22]
reg uops_5_fu_code_9; // @[util.scala:505:22]
reg uops_5_iw_issued; // @[util.scala:505:22]
reg uops_5_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_5_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_5_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_5_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_5_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_5_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_5_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_5_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_5_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_5_br_mask_T_1 = uops_5_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_5_br_tag; // @[util.scala:505:22]
reg [3:0] uops_5_br_type; // @[util.scala:505:22]
reg uops_5_is_sfb; // @[util.scala:505:22]
reg uops_5_is_fence; // @[util.scala:505:22]
reg uops_5_is_fencei; // @[util.scala:505:22]
reg uops_5_is_sfence; // @[util.scala:505:22]
reg uops_5_is_amo; // @[util.scala:505:22]
reg uops_5_is_eret; // @[util.scala:505:22]
reg uops_5_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_5_is_rocc; // @[util.scala:505:22]
reg uops_5_is_mov; // @[util.scala:505:22]
reg [3:0] uops_5_ftq_idx; // @[util.scala:505:22]
reg uops_5_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_5_pc_lob; // @[util.scala:505:22]
reg uops_5_taken; // @[util.scala:505:22]
reg uops_5_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_5_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_5_pimm; // @[util.scala:505:22]
reg [19:0] uops_5_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_5_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_5_op2_sel; // @[util.scala:505:22]
reg uops_5_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_5_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_5_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_5_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_5_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_5_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_5_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_5_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_5_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_5_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_5_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_5_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_5_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_5_fp_ctrl_div; // @[util.scala:505:22]
reg uops_5_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_5_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_5_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_5_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_5_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_5_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_5_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_5_pdst; // @[util.scala:505:22]
reg [5:0] uops_5_prs1; // @[util.scala:505:22]
reg [5:0] uops_5_prs2; // @[util.scala:505:22]
reg [5:0] uops_5_prs3; // @[util.scala:505:22]
reg [3:0] uops_5_ppred; // @[util.scala:505:22]
reg uops_5_prs1_busy; // @[util.scala:505:22]
reg uops_5_prs2_busy; // @[util.scala:505:22]
reg uops_5_prs3_busy; // @[util.scala:505:22]
reg uops_5_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_5_stale_pdst; // @[util.scala:505:22]
reg uops_5_exception; // @[util.scala:505:22]
reg [63:0] uops_5_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_5_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_5_mem_size; // @[util.scala:505:22]
reg uops_5_mem_signed; // @[util.scala:505:22]
reg uops_5_uses_ldq; // @[util.scala:505:22]
reg uops_5_uses_stq; // @[util.scala:505:22]
reg uops_5_is_unique; // @[util.scala:505:22]
reg uops_5_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_5_csr_cmd; // @[util.scala:505:22]
reg uops_5_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_5_ldst; // @[util.scala:505:22]
reg [5:0] uops_5_lrs1; // @[util.scala:505:22]
reg [5:0] uops_5_lrs2; // @[util.scala:505:22]
reg [5:0] uops_5_lrs3; // @[util.scala:505:22]
reg [1:0] uops_5_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_5_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_5_lrs2_rtype; // @[util.scala:505:22]
reg uops_5_frs3_en; // @[util.scala:505:22]
reg uops_5_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_5_fcn_op; // @[util.scala:505:22]
reg uops_5_fp_val; // @[util.scala:505:22]
reg [2:0] uops_5_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_5_fp_typ; // @[util.scala:505:22]
reg uops_5_xcpt_pf_if; // @[util.scala:505:22]
reg uops_5_xcpt_ae_if; // @[util.scala:505:22]
reg uops_5_xcpt_ma_if; // @[util.scala:505:22]
reg uops_5_bp_debug_if; // @[util.scala:505:22]
reg uops_5_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_5_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_5_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_6_inst; // @[util.scala:505:22]
reg [31:0] uops_6_debug_inst; // @[util.scala:505:22]
reg uops_6_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_6_debug_pc; // @[util.scala:505:22]
reg uops_6_iq_type_0; // @[util.scala:505:22]
reg uops_6_iq_type_1; // @[util.scala:505:22]
reg uops_6_iq_type_2; // @[util.scala:505:22]
reg uops_6_iq_type_3; // @[util.scala:505:22]
reg uops_6_fu_code_0; // @[util.scala:505:22]
reg uops_6_fu_code_1; // @[util.scala:505:22]
reg uops_6_fu_code_2; // @[util.scala:505:22]
reg uops_6_fu_code_3; // @[util.scala:505:22]
reg uops_6_fu_code_4; // @[util.scala:505:22]
reg uops_6_fu_code_5; // @[util.scala:505:22]
reg uops_6_fu_code_6; // @[util.scala:505:22]
reg uops_6_fu_code_7; // @[util.scala:505:22]
reg uops_6_fu_code_8; // @[util.scala:505:22]
reg uops_6_fu_code_9; // @[util.scala:505:22]
reg uops_6_iw_issued; // @[util.scala:505:22]
reg uops_6_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_6_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_6_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_6_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_6_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_6_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_6_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_6_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_6_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_6_br_mask_T_1 = uops_6_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_6_br_tag; // @[util.scala:505:22]
reg [3:0] uops_6_br_type; // @[util.scala:505:22]
reg uops_6_is_sfb; // @[util.scala:505:22]
reg uops_6_is_fence; // @[util.scala:505:22]
reg uops_6_is_fencei; // @[util.scala:505:22]
reg uops_6_is_sfence; // @[util.scala:505:22]
reg uops_6_is_amo; // @[util.scala:505:22]
reg uops_6_is_eret; // @[util.scala:505:22]
reg uops_6_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_6_is_rocc; // @[util.scala:505:22]
reg uops_6_is_mov; // @[util.scala:505:22]
reg [3:0] uops_6_ftq_idx; // @[util.scala:505:22]
reg uops_6_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_6_pc_lob; // @[util.scala:505:22]
reg uops_6_taken; // @[util.scala:505:22]
reg uops_6_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_6_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_6_pimm; // @[util.scala:505:22]
reg [19:0] uops_6_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_6_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_6_op2_sel; // @[util.scala:505:22]
reg uops_6_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_6_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_6_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_6_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_6_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_6_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_6_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_6_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_6_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_6_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_6_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_6_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_6_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_6_fp_ctrl_div; // @[util.scala:505:22]
reg uops_6_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_6_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_6_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_6_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_6_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_6_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_6_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_6_pdst; // @[util.scala:505:22]
reg [5:0] uops_6_prs1; // @[util.scala:505:22]
reg [5:0] uops_6_prs2; // @[util.scala:505:22]
reg [5:0] uops_6_prs3; // @[util.scala:505:22]
reg [3:0] uops_6_ppred; // @[util.scala:505:22]
reg uops_6_prs1_busy; // @[util.scala:505:22]
reg uops_6_prs2_busy; // @[util.scala:505:22]
reg uops_6_prs3_busy; // @[util.scala:505:22]
reg uops_6_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_6_stale_pdst; // @[util.scala:505:22]
reg uops_6_exception; // @[util.scala:505:22]
reg [63:0] uops_6_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_6_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_6_mem_size; // @[util.scala:505:22]
reg uops_6_mem_signed; // @[util.scala:505:22]
reg uops_6_uses_ldq; // @[util.scala:505:22]
reg uops_6_uses_stq; // @[util.scala:505:22]
reg uops_6_is_unique; // @[util.scala:505:22]
reg uops_6_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_6_csr_cmd; // @[util.scala:505:22]
reg uops_6_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_6_ldst; // @[util.scala:505:22]
reg [5:0] uops_6_lrs1; // @[util.scala:505:22]
reg [5:0] uops_6_lrs2; // @[util.scala:505:22]
reg [5:0] uops_6_lrs3; // @[util.scala:505:22]
reg [1:0] uops_6_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_6_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_6_lrs2_rtype; // @[util.scala:505:22]
reg uops_6_frs3_en; // @[util.scala:505:22]
reg uops_6_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_6_fcn_op; // @[util.scala:505:22]
reg uops_6_fp_val; // @[util.scala:505:22]
reg [2:0] uops_6_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_6_fp_typ; // @[util.scala:505:22]
reg uops_6_xcpt_pf_if; // @[util.scala:505:22]
reg uops_6_xcpt_ae_if; // @[util.scala:505:22]
reg uops_6_xcpt_ma_if; // @[util.scala:505:22]
reg uops_6_bp_debug_if; // @[util.scala:505:22]
reg uops_6_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_6_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_6_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_7_inst; // @[util.scala:505:22]
reg [31:0] uops_7_debug_inst; // @[util.scala:505:22]
reg uops_7_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_7_debug_pc; // @[util.scala:505:22]
reg uops_7_iq_type_0; // @[util.scala:505:22]
reg uops_7_iq_type_1; // @[util.scala:505:22]
reg uops_7_iq_type_2; // @[util.scala:505:22]
reg uops_7_iq_type_3; // @[util.scala:505:22]
reg uops_7_fu_code_0; // @[util.scala:505:22]
reg uops_7_fu_code_1; // @[util.scala:505:22]
reg uops_7_fu_code_2; // @[util.scala:505:22]
reg uops_7_fu_code_3; // @[util.scala:505:22]
reg uops_7_fu_code_4; // @[util.scala:505:22]
reg uops_7_fu_code_5; // @[util.scala:505:22]
reg uops_7_fu_code_6; // @[util.scala:505:22]
reg uops_7_fu_code_7; // @[util.scala:505:22]
reg uops_7_fu_code_8; // @[util.scala:505:22]
reg uops_7_fu_code_9; // @[util.scala:505:22]
reg uops_7_iw_issued; // @[util.scala:505:22]
reg uops_7_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_7_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_7_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_7_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_7_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_7_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_7_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_7_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_7_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_7_br_mask_T_1 = uops_7_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_7_br_tag; // @[util.scala:505:22]
reg [3:0] uops_7_br_type; // @[util.scala:505:22]
reg uops_7_is_sfb; // @[util.scala:505:22]
reg uops_7_is_fence; // @[util.scala:505:22]
reg uops_7_is_fencei; // @[util.scala:505:22]
reg uops_7_is_sfence; // @[util.scala:505:22]
reg uops_7_is_amo; // @[util.scala:505:22]
reg uops_7_is_eret; // @[util.scala:505:22]
reg uops_7_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_7_is_rocc; // @[util.scala:505:22]
reg uops_7_is_mov; // @[util.scala:505:22]
reg [3:0] uops_7_ftq_idx; // @[util.scala:505:22]
reg uops_7_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_7_pc_lob; // @[util.scala:505:22]
reg uops_7_taken; // @[util.scala:505:22]
reg uops_7_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_7_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_7_pimm; // @[util.scala:505:22]
reg [19:0] uops_7_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_7_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_7_op2_sel; // @[util.scala:505:22]
reg uops_7_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_7_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_7_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_7_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_7_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_7_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_7_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_7_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_7_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_7_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_7_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_7_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_7_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_7_fp_ctrl_div; // @[util.scala:505:22]
reg uops_7_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_7_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_7_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_7_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_7_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_7_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_7_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_7_pdst; // @[util.scala:505:22]
reg [5:0] uops_7_prs1; // @[util.scala:505:22]
reg [5:0] uops_7_prs2; // @[util.scala:505:22]
reg [5:0] uops_7_prs3; // @[util.scala:505:22]
reg [3:0] uops_7_ppred; // @[util.scala:505:22]
reg uops_7_prs1_busy; // @[util.scala:505:22]
reg uops_7_prs2_busy; // @[util.scala:505:22]
reg uops_7_prs3_busy; // @[util.scala:505:22]
reg uops_7_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_7_stale_pdst; // @[util.scala:505:22]
reg uops_7_exception; // @[util.scala:505:22]
reg [63:0] uops_7_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_7_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_7_mem_size; // @[util.scala:505:22]
reg uops_7_mem_signed; // @[util.scala:505:22]
reg uops_7_uses_ldq; // @[util.scala:505:22]
reg uops_7_uses_stq; // @[util.scala:505:22]
reg uops_7_is_unique; // @[util.scala:505:22]
reg uops_7_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_7_csr_cmd; // @[util.scala:505:22]
reg uops_7_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_7_ldst; // @[util.scala:505:22]
reg [5:0] uops_7_lrs1; // @[util.scala:505:22]
reg [5:0] uops_7_lrs2; // @[util.scala:505:22]
reg [5:0] uops_7_lrs3; // @[util.scala:505:22]
reg [1:0] uops_7_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_7_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_7_lrs2_rtype; // @[util.scala:505:22]
reg uops_7_frs3_en; // @[util.scala:505:22]
reg uops_7_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_7_fcn_op; // @[util.scala:505:22]
reg uops_7_fp_val; // @[util.scala:505:22]
reg [2:0] uops_7_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_7_fp_typ; // @[util.scala:505:22]
reg uops_7_xcpt_pf_if; // @[util.scala:505:22]
reg uops_7_xcpt_ae_if; // @[util.scala:505:22]
reg uops_7_xcpt_ma_if; // @[util.scala:505:22]
reg uops_7_bp_debug_if; // @[util.scala:505:22]
reg uops_7_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_7_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_7_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_8_inst; // @[util.scala:505:22]
reg [31:0] uops_8_debug_inst; // @[util.scala:505:22]
reg uops_8_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_8_debug_pc; // @[util.scala:505:22]
reg uops_8_iq_type_0; // @[util.scala:505:22]
reg uops_8_iq_type_1; // @[util.scala:505:22]
reg uops_8_iq_type_2; // @[util.scala:505:22]
reg uops_8_iq_type_3; // @[util.scala:505:22]
reg uops_8_fu_code_0; // @[util.scala:505:22]
reg uops_8_fu_code_1; // @[util.scala:505:22]
reg uops_8_fu_code_2; // @[util.scala:505:22]
reg uops_8_fu_code_3; // @[util.scala:505:22]
reg uops_8_fu_code_4; // @[util.scala:505:22]
reg uops_8_fu_code_5; // @[util.scala:505:22]
reg uops_8_fu_code_6; // @[util.scala:505:22]
reg uops_8_fu_code_7; // @[util.scala:505:22]
reg uops_8_fu_code_8; // @[util.scala:505:22]
reg uops_8_fu_code_9; // @[util.scala:505:22]
reg uops_8_iw_issued; // @[util.scala:505:22]
reg uops_8_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_8_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_8_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_8_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_8_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_8_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_8_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_8_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_8_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_8_br_mask_T_1 = uops_8_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_8_br_tag; // @[util.scala:505:22]
reg [3:0] uops_8_br_type; // @[util.scala:505:22]
reg uops_8_is_sfb; // @[util.scala:505:22]
reg uops_8_is_fence; // @[util.scala:505:22]
reg uops_8_is_fencei; // @[util.scala:505:22]
reg uops_8_is_sfence; // @[util.scala:505:22]
reg uops_8_is_amo; // @[util.scala:505:22]
reg uops_8_is_eret; // @[util.scala:505:22]
reg uops_8_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_8_is_rocc; // @[util.scala:505:22]
reg uops_8_is_mov; // @[util.scala:505:22]
reg [3:0] uops_8_ftq_idx; // @[util.scala:505:22]
reg uops_8_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_8_pc_lob; // @[util.scala:505:22]
reg uops_8_taken; // @[util.scala:505:22]
reg uops_8_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_8_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_8_pimm; // @[util.scala:505:22]
reg [19:0] uops_8_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_8_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_8_op2_sel; // @[util.scala:505:22]
reg uops_8_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_8_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_8_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_8_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_8_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_8_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_8_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_8_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_8_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_8_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_8_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_8_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_8_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_8_fp_ctrl_div; // @[util.scala:505:22]
reg uops_8_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_8_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_8_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_8_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_8_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_8_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_8_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_8_pdst; // @[util.scala:505:22]
reg [5:0] uops_8_prs1; // @[util.scala:505:22]
reg [5:0] uops_8_prs2; // @[util.scala:505:22]
reg [5:0] uops_8_prs3; // @[util.scala:505:22]
reg [3:0] uops_8_ppred; // @[util.scala:505:22]
reg uops_8_prs1_busy; // @[util.scala:505:22]
reg uops_8_prs2_busy; // @[util.scala:505:22]
reg uops_8_prs3_busy; // @[util.scala:505:22]
reg uops_8_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_8_stale_pdst; // @[util.scala:505:22]
reg uops_8_exception; // @[util.scala:505:22]
reg [63:0] uops_8_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_8_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_8_mem_size; // @[util.scala:505:22]
reg uops_8_mem_signed; // @[util.scala:505:22]
reg uops_8_uses_ldq; // @[util.scala:505:22]
reg uops_8_uses_stq; // @[util.scala:505:22]
reg uops_8_is_unique; // @[util.scala:505:22]
reg uops_8_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_8_csr_cmd; // @[util.scala:505:22]
reg uops_8_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_8_ldst; // @[util.scala:505:22]
reg [5:0] uops_8_lrs1; // @[util.scala:505:22]
reg [5:0] uops_8_lrs2; // @[util.scala:505:22]
reg [5:0] uops_8_lrs3; // @[util.scala:505:22]
reg [1:0] uops_8_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_8_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_8_lrs2_rtype; // @[util.scala:505:22]
reg uops_8_frs3_en; // @[util.scala:505:22]
reg uops_8_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_8_fcn_op; // @[util.scala:505:22]
reg uops_8_fp_val; // @[util.scala:505:22]
reg [2:0] uops_8_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_8_fp_typ; // @[util.scala:505:22]
reg uops_8_xcpt_pf_if; // @[util.scala:505:22]
reg uops_8_xcpt_ae_if; // @[util.scala:505:22]
reg uops_8_xcpt_ma_if; // @[util.scala:505:22]
reg uops_8_bp_debug_if; // @[util.scala:505:22]
reg uops_8_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_8_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_8_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_9_inst; // @[util.scala:505:22]
reg [31:0] uops_9_debug_inst; // @[util.scala:505:22]
reg uops_9_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_9_debug_pc; // @[util.scala:505:22]
reg uops_9_iq_type_0; // @[util.scala:505:22]
reg uops_9_iq_type_1; // @[util.scala:505:22]
reg uops_9_iq_type_2; // @[util.scala:505:22]
reg uops_9_iq_type_3; // @[util.scala:505:22]
reg uops_9_fu_code_0; // @[util.scala:505:22]
reg uops_9_fu_code_1; // @[util.scala:505:22]
reg uops_9_fu_code_2; // @[util.scala:505:22]
reg uops_9_fu_code_3; // @[util.scala:505:22]
reg uops_9_fu_code_4; // @[util.scala:505:22]
reg uops_9_fu_code_5; // @[util.scala:505:22]
reg uops_9_fu_code_6; // @[util.scala:505:22]
reg uops_9_fu_code_7; // @[util.scala:505:22]
reg uops_9_fu_code_8; // @[util.scala:505:22]
reg uops_9_fu_code_9; // @[util.scala:505:22]
reg uops_9_iw_issued; // @[util.scala:505:22]
reg uops_9_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_9_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_9_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_9_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_9_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_9_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_9_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_9_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_9_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_9_br_mask_T_1 = uops_9_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_9_br_tag; // @[util.scala:505:22]
reg [3:0] uops_9_br_type; // @[util.scala:505:22]
reg uops_9_is_sfb; // @[util.scala:505:22]
reg uops_9_is_fence; // @[util.scala:505:22]
reg uops_9_is_fencei; // @[util.scala:505:22]
reg uops_9_is_sfence; // @[util.scala:505:22]
reg uops_9_is_amo; // @[util.scala:505:22]
reg uops_9_is_eret; // @[util.scala:505:22]
reg uops_9_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_9_is_rocc; // @[util.scala:505:22]
reg uops_9_is_mov; // @[util.scala:505:22]
reg [3:0] uops_9_ftq_idx; // @[util.scala:505:22]
reg uops_9_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_9_pc_lob; // @[util.scala:505:22]
reg uops_9_taken; // @[util.scala:505:22]
reg uops_9_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_9_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_9_pimm; // @[util.scala:505:22]
reg [19:0] uops_9_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_9_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_9_op2_sel; // @[util.scala:505:22]
reg uops_9_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_9_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_9_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_9_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_9_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_9_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_9_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_9_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_9_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_9_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_9_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_9_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_9_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_9_fp_ctrl_div; // @[util.scala:505:22]
reg uops_9_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_9_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_9_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_9_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_9_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_9_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_9_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_9_pdst; // @[util.scala:505:22]
reg [5:0] uops_9_prs1; // @[util.scala:505:22]
reg [5:0] uops_9_prs2; // @[util.scala:505:22]
reg [5:0] uops_9_prs3; // @[util.scala:505:22]
reg [3:0] uops_9_ppred; // @[util.scala:505:22]
reg uops_9_prs1_busy; // @[util.scala:505:22]
reg uops_9_prs2_busy; // @[util.scala:505:22]
reg uops_9_prs3_busy; // @[util.scala:505:22]
reg uops_9_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_9_stale_pdst; // @[util.scala:505:22]
reg uops_9_exception; // @[util.scala:505:22]
reg [63:0] uops_9_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_9_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_9_mem_size; // @[util.scala:505:22]
reg uops_9_mem_signed; // @[util.scala:505:22]
reg uops_9_uses_ldq; // @[util.scala:505:22]
reg uops_9_uses_stq; // @[util.scala:505:22]
reg uops_9_is_unique; // @[util.scala:505:22]
reg uops_9_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_9_csr_cmd; // @[util.scala:505:22]
reg uops_9_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_9_ldst; // @[util.scala:505:22]
reg [5:0] uops_9_lrs1; // @[util.scala:505:22]
reg [5:0] uops_9_lrs2; // @[util.scala:505:22]
reg [5:0] uops_9_lrs3; // @[util.scala:505:22]
reg [1:0] uops_9_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_9_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_9_lrs2_rtype; // @[util.scala:505:22]
reg uops_9_frs3_en; // @[util.scala:505:22]
reg uops_9_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_9_fcn_op; // @[util.scala:505:22]
reg uops_9_fp_val; // @[util.scala:505:22]
reg [2:0] uops_9_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_9_fp_typ; // @[util.scala:505:22]
reg uops_9_xcpt_pf_if; // @[util.scala:505:22]
reg uops_9_xcpt_ae_if; // @[util.scala:505:22]
reg uops_9_xcpt_ma_if; // @[util.scala:505:22]
reg uops_9_bp_debug_if; // @[util.scala:505:22]
reg uops_9_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_9_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_9_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_10_inst; // @[util.scala:505:22]
reg [31:0] uops_10_debug_inst; // @[util.scala:505:22]
reg uops_10_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_10_debug_pc; // @[util.scala:505:22]
reg uops_10_iq_type_0; // @[util.scala:505:22]
reg uops_10_iq_type_1; // @[util.scala:505:22]
reg uops_10_iq_type_2; // @[util.scala:505:22]
reg uops_10_iq_type_3; // @[util.scala:505:22]
reg uops_10_fu_code_0; // @[util.scala:505:22]
reg uops_10_fu_code_1; // @[util.scala:505:22]
reg uops_10_fu_code_2; // @[util.scala:505:22]
reg uops_10_fu_code_3; // @[util.scala:505:22]
reg uops_10_fu_code_4; // @[util.scala:505:22]
reg uops_10_fu_code_5; // @[util.scala:505:22]
reg uops_10_fu_code_6; // @[util.scala:505:22]
reg uops_10_fu_code_7; // @[util.scala:505:22]
reg uops_10_fu_code_8; // @[util.scala:505:22]
reg uops_10_fu_code_9; // @[util.scala:505:22]
reg uops_10_iw_issued; // @[util.scala:505:22]
reg uops_10_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_10_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_10_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_10_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_10_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_10_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_10_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_10_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_10_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_10_br_mask_T_1 = uops_10_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_10_br_tag; // @[util.scala:505:22]
reg [3:0] uops_10_br_type; // @[util.scala:505:22]
reg uops_10_is_sfb; // @[util.scala:505:22]
reg uops_10_is_fence; // @[util.scala:505:22]
reg uops_10_is_fencei; // @[util.scala:505:22]
reg uops_10_is_sfence; // @[util.scala:505:22]
reg uops_10_is_amo; // @[util.scala:505:22]
reg uops_10_is_eret; // @[util.scala:505:22]
reg uops_10_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_10_is_rocc; // @[util.scala:505:22]
reg uops_10_is_mov; // @[util.scala:505:22]
reg [3:0] uops_10_ftq_idx; // @[util.scala:505:22]
reg uops_10_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_10_pc_lob; // @[util.scala:505:22]
reg uops_10_taken; // @[util.scala:505:22]
reg uops_10_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_10_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_10_pimm; // @[util.scala:505:22]
reg [19:0] uops_10_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_10_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_10_op2_sel; // @[util.scala:505:22]
reg uops_10_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_10_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_10_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_10_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_10_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_10_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_10_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_10_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_10_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_10_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_10_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_10_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_10_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_10_fp_ctrl_div; // @[util.scala:505:22]
reg uops_10_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_10_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_10_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_10_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_10_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_10_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_10_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_10_pdst; // @[util.scala:505:22]
reg [5:0] uops_10_prs1; // @[util.scala:505:22]
reg [5:0] uops_10_prs2; // @[util.scala:505:22]
reg [5:0] uops_10_prs3; // @[util.scala:505:22]
reg [3:0] uops_10_ppred; // @[util.scala:505:22]
reg uops_10_prs1_busy; // @[util.scala:505:22]
reg uops_10_prs2_busy; // @[util.scala:505:22]
reg uops_10_prs3_busy; // @[util.scala:505:22]
reg uops_10_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_10_stale_pdst; // @[util.scala:505:22]
reg uops_10_exception; // @[util.scala:505:22]
reg [63:0] uops_10_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_10_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_10_mem_size; // @[util.scala:505:22]
reg uops_10_mem_signed; // @[util.scala:505:22]
reg uops_10_uses_ldq; // @[util.scala:505:22]
reg uops_10_uses_stq; // @[util.scala:505:22]
reg uops_10_is_unique; // @[util.scala:505:22]
reg uops_10_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_10_csr_cmd; // @[util.scala:505:22]
reg uops_10_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_10_ldst; // @[util.scala:505:22]
reg [5:0] uops_10_lrs1; // @[util.scala:505:22]
reg [5:0] uops_10_lrs2; // @[util.scala:505:22]
reg [5:0] uops_10_lrs3; // @[util.scala:505:22]
reg [1:0] uops_10_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_10_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_10_lrs2_rtype; // @[util.scala:505:22]
reg uops_10_frs3_en; // @[util.scala:505:22]
reg uops_10_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_10_fcn_op; // @[util.scala:505:22]
reg uops_10_fp_val; // @[util.scala:505:22]
reg [2:0] uops_10_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_10_fp_typ; // @[util.scala:505:22]
reg uops_10_xcpt_pf_if; // @[util.scala:505:22]
reg uops_10_xcpt_ae_if; // @[util.scala:505:22]
reg uops_10_xcpt_ma_if; // @[util.scala:505:22]
reg uops_10_bp_debug_if; // @[util.scala:505:22]
reg uops_10_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_10_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_10_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_11_inst; // @[util.scala:505:22]
reg [31:0] uops_11_debug_inst; // @[util.scala:505:22]
reg uops_11_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_11_debug_pc; // @[util.scala:505:22]
reg uops_11_iq_type_0; // @[util.scala:505:22]
reg uops_11_iq_type_1; // @[util.scala:505:22]
reg uops_11_iq_type_2; // @[util.scala:505:22]
reg uops_11_iq_type_3; // @[util.scala:505:22]
reg uops_11_fu_code_0; // @[util.scala:505:22]
reg uops_11_fu_code_1; // @[util.scala:505:22]
reg uops_11_fu_code_2; // @[util.scala:505:22]
reg uops_11_fu_code_3; // @[util.scala:505:22]
reg uops_11_fu_code_4; // @[util.scala:505:22]
reg uops_11_fu_code_5; // @[util.scala:505:22]
reg uops_11_fu_code_6; // @[util.scala:505:22]
reg uops_11_fu_code_7; // @[util.scala:505:22]
reg uops_11_fu_code_8; // @[util.scala:505:22]
reg uops_11_fu_code_9; // @[util.scala:505:22]
reg uops_11_iw_issued; // @[util.scala:505:22]
reg uops_11_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_11_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_11_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_11_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_11_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_11_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_11_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_11_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_11_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_11_br_mask_T_1 = uops_11_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_11_br_tag; // @[util.scala:505:22]
reg [3:0] uops_11_br_type; // @[util.scala:505:22]
reg uops_11_is_sfb; // @[util.scala:505:22]
reg uops_11_is_fence; // @[util.scala:505:22]
reg uops_11_is_fencei; // @[util.scala:505:22]
reg uops_11_is_sfence; // @[util.scala:505:22]
reg uops_11_is_amo; // @[util.scala:505:22]
reg uops_11_is_eret; // @[util.scala:505:22]
reg uops_11_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_11_is_rocc; // @[util.scala:505:22]
reg uops_11_is_mov; // @[util.scala:505:22]
reg [3:0] uops_11_ftq_idx; // @[util.scala:505:22]
reg uops_11_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_11_pc_lob; // @[util.scala:505:22]
reg uops_11_taken; // @[util.scala:505:22]
reg uops_11_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_11_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_11_pimm; // @[util.scala:505:22]
reg [19:0] uops_11_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_11_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_11_op2_sel; // @[util.scala:505:22]
reg uops_11_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_11_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_11_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_11_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_11_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_11_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_11_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_11_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_11_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_11_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_11_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_11_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_11_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_11_fp_ctrl_div; // @[util.scala:505:22]
reg uops_11_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_11_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_11_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_11_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_11_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_11_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_11_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_11_pdst; // @[util.scala:505:22]
reg [5:0] uops_11_prs1; // @[util.scala:505:22]
reg [5:0] uops_11_prs2; // @[util.scala:505:22]
reg [5:0] uops_11_prs3; // @[util.scala:505:22]
reg [3:0] uops_11_ppred; // @[util.scala:505:22]
reg uops_11_prs1_busy; // @[util.scala:505:22]
reg uops_11_prs2_busy; // @[util.scala:505:22]
reg uops_11_prs3_busy; // @[util.scala:505:22]
reg uops_11_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_11_stale_pdst; // @[util.scala:505:22]
reg uops_11_exception; // @[util.scala:505:22]
reg [63:0] uops_11_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_11_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_11_mem_size; // @[util.scala:505:22]
reg uops_11_mem_signed; // @[util.scala:505:22]
reg uops_11_uses_ldq; // @[util.scala:505:22]
reg uops_11_uses_stq; // @[util.scala:505:22]
reg uops_11_is_unique; // @[util.scala:505:22]
reg uops_11_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_11_csr_cmd; // @[util.scala:505:22]
reg uops_11_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_11_ldst; // @[util.scala:505:22]
reg [5:0] uops_11_lrs1; // @[util.scala:505:22]
reg [5:0] uops_11_lrs2; // @[util.scala:505:22]
reg [5:0] uops_11_lrs3; // @[util.scala:505:22]
reg [1:0] uops_11_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_11_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_11_lrs2_rtype; // @[util.scala:505:22]
reg uops_11_frs3_en; // @[util.scala:505:22]
reg uops_11_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_11_fcn_op; // @[util.scala:505:22]
reg uops_11_fp_val; // @[util.scala:505:22]
reg [2:0] uops_11_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_11_fp_typ; // @[util.scala:505:22]
reg uops_11_xcpt_pf_if; // @[util.scala:505:22]
reg uops_11_xcpt_ae_if; // @[util.scala:505:22]
reg uops_11_xcpt_ma_if; // @[util.scala:505:22]
reg uops_11_bp_debug_if; // @[util.scala:505:22]
reg uops_11_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_11_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_11_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_12_inst; // @[util.scala:505:22]
reg [31:0] uops_12_debug_inst; // @[util.scala:505:22]
reg uops_12_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_12_debug_pc; // @[util.scala:505:22]
reg uops_12_iq_type_0; // @[util.scala:505:22]
reg uops_12_iq_type_1; // @[util.scala:505:22]
reg uops_12_iq_type_2; // @[util.scala:505:22]
reg uops_12_iq_type_3; // @[util.scala:505:22]
reg uops_12_fu_code_0; // @[util.scala:505:22]
reg uops_12_fu_code_1; // @[util.scala:505:22]
reg uops_12_fu_code_2; // @[util.scala:505:22]
reg uops_12_fu_code_3; // @[util.scala:505:22]
reg uops_12_fu_code_4; // @[util.scala:505:22]
reg uops_12_fu_code_5; // @[util.scala:505:22]
reg uops_12_fu_code_6; // @[util.scala:505:22]
reg uops_12_fu_code_7; // @[util.scala:505:22]
reg uops_12_fu_code_8; // @[util.scala:505:22]
reg uops_12_fu_code_9; // @[util.scala:505:22]
reg uops_12_iw_issued; // @[util.scala:505:22]
reg uops_12_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_12_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_12_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_12_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_12_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_12_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_12_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_12_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_12_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_12_br_mask_T_1 = uops_12_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_12_br_tag; // @[util.scala:505:22]
reg [3:0] uops_12_br_type; // @[util.scala:505:22]
reg uops_12_is_sfb; // @[util.scala:505:22]
reg uops_12_is_fence; // @[util.scala:505:22]
reg uops_12_is_fencei; // @[util.scala:505:22]
reg uops_12_is_sfence; // @[util.scala:505:22]
reg uops_12_is_amo; // @[util.scala:505:22]
reg uops_12_is_eret; // @[util.scala:505:22]
reg uops_12_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_12_is_rocc; // @[util.scala:505:22]
reg uops_12_is_mov; // @[util.scala:505:22]
reg [3:0] uops_12_ftq_idx; // @[util.scala:505:22]
reg uops_12_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_12_pc_lob; // @[util.scala:505:22]
reg uops_12_taken; // @[util.scala:505:22]
reg uops_12_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_12_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_12_pimm; // @[util.scala:505:22]
reg [19:0] uops_12_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_12_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_12_op2_sel; // @[util.scala:505:22]
reg uops_12_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_12_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_12_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_12_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_12_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_12_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_12_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_12_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_12_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_12_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_12_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_12_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_12_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_12_fp_ctrl_div; // @[util.scala:505:22]
reg uops_12_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_12_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_12_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_12_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_12_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_12_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_12_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_12_pdst; // @[util.scala:505:22]
reg [5:0] uops_12_prs1; // @[util.scala:505:22]
reg [5:0] uops_12_prs2; // @[util.scala:505:22]
reg [5:0] uops_12_prs3; // @[util.scala:505:22]
reg [3:0] uops_12_ppred; // @[util.scala:505:22]
reg uops_12_prs1_busy; // @[util.scala:505:22]
reg uops_12_prs2_busy; // @[util.scala:505:22]
reg uops_12_prs3_busy; // @[util.scala:505:22]
reg uops_12_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_12_stale_pdst; // @[util.scala:505:22]
reg uops_12_exception; // @[util.scala:505:22]
reg [63:0] uops_12_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_12_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_12_mem_size; // @[util.scala:505:22]
reg uops_12_mem_signed; // @[util.scala:505:22]
reg uops_12_uses_ldq; // @[util.scala:505:22]
reg uops_12_uses_stq; // @[util.scala:505:22]
reg uops_12_is_unique; // @[util.scala:505:22]
reg uops_12_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_12_csr_cmd; // @[util.scala:505:22]
reg uops_12_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_12_ldst; // @[util.scala:505:22]
reg [5:0] uops_12_lrs1; // @[util.scala:505:22]
reg [5:0] uops_12_lrs2; // @[util.scala:505:22]
reg [5:0] uops_12_lrs3; // @[util.scala:505:22]
reg [1:0] uops_12_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_12_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_12_lrs2_rtype; // @[util.scala:505:22]
reg uops_12_frs3_en; // @[util.scala:505:22]
reg uops_12_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_12_fcn_op; // @[util.scala:505:22]
reg uops_12_fp_val; // @[util.scala:505:22]
reg [2:0] uops_12_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_12_fp_typ; // @[util.scala:505:22]
reg uops_12_xcpt_pf_if; // @[util.scala:505:22]
reg uops_12_xcpt_ae_if; // @[util.scala:505:22]
reg uops_12_xcpt_ma_if; // @[util.scala:505:22]
reg uops_12_bp_debug_if; // @[util.scala:505:22]
reg uops_12_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_12_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_12_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_13_inst; // @[util.scala:505:22]
reg [31:0] uops_13_debug_inst; // @[util.scala:505:22]
reg uops_13_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_13_debug_pc; // @[util.scala:505:22]
reg uops_13_iq_type_0; // @[util.scala:505:22]
reg uops_13_iq_type_1; // @[util.scala:505:22]
reg uops_13_iq_type_2; // @[util.scala:505:22]
reg uops_13_iq_type_3; // @[util.scala:505:22]
reg uops_13_fu_code_0; // @[util.scala:505:22]
reg uops_13_fu_code_1; // @[util.scala:505:22]
reg uops_13_fu_code_2; // @[util.scala:505:22]
reg uops_13_fu_code_3; // @[util.scala:505:22]
reg uops_13_fu_code_4; // @[util.scala:505:22]
reg uops_13_fu_code_5; // @[util.scala:505:22]
reg uops_13_fu_code_6; // @[util.scala:505:22]
reg uops_13_fu_code_7; // @[util.scala:505:22]
reg uops_13_fu_code_8; // @[util.scala:505:22]
reg uops_13_fu_code_9; // @[util.scala:505:22]
reg uops_13_iw_issued; // @[util.scala:505:22]
reg uops_13_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_13_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_13_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_13_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_13_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_13_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_13_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_13_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_13_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_13_br_mask_T_1 = uops_13_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_13_br_tag; // @[util.scala:505:22]
reg [3:0] uops_13_br_type; // @[util.scala:505:22]
reg uops_13_is_sfb; // @[util.scala:505:22]
reg uops_13_is_fence; // @[util.scala:505:22]
reg uops_13_is_fencei; // @[util.scala:505:22]
reg uops_13_is_sfence; // @[util.scala:505:22]
reg uops_13_is_amo; // @[util.scala:505:22]
reg uops_13_is_eret; // @[util.scala:505:22]
reg uops_13_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_13_is_rocc; // @[util.scala:505:22]
reg uops_13_is_mov; // @[util.scala:505:22]
reg [3:0] uops_13_ftq_idx; // @[util.scala:505:22]
reg uops_13_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_13_pc_lob; // @[util.scala:505:22]
reg uops_13_taken; // @[util.scala:505:22]
reg uops_13_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_13_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_13_pimm; // @[util.scala:505:22]
reg [19:0] uops_13_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_13_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_13_op2_sel; // @[util.scala:505:22]
reg uops_13_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_13_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_13_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_13_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_13_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_13_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_13_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_13_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_13_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_13_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_13_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_13_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_13_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_13_fp_ctrl_div; // @[util.scala:505:22]
reg uops_13_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_13_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_13_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_13_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_13_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_13_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_13_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_13_pdst; // @[util.scala:505:22]
reg [5:0] uops_13_prs1; // @[util.scala:505:22]
reg [5:0] uops_13_prs2; // @[util.scala:505:22]
reg [5:0] uops_13_prs3; // @[util.scala:505:22]
reg [3:0] uops_13_ppred; // @[util.scala:505:22]
reg uops_13_prs1_busy; // @[util.scala:505:22]
reg uops_13_prs2_busy; // @[util.scala:505:22]
reg uops_13_prs3_busy; // @[util.scala:505:22]
reg uops_13_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_13_stale_pdst; // @[util.scala:505:22]
reg uops_13_exception; // @[util.scala:505:22]
reg [63:0] uops_13_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_13_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_13_mem_size; // @[util.scala:505:22]
reg uops_13_mem_signed; // @[util.scala:505:22]
reg uops_13_uses_ldq; // @[util.scala:505:22]
reg uops_13_uses_stq; // @[util.scala:505:22]
reg uops_13_is_unique; // @[util.scala:505:22]
reg uops_13_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_13_csr_cmd; // @[util.scala:505:22]
reg uops_13_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_13_ldst; // @[util.scala:505:22]
reg [5:0] uops_13_lrs1; // @[util.scala:505:22]
reg [5:0] uops_13_lrs2; // @[util.scala:505:22]
reg [5:0] uops_13_lrs3; // @[util.scala:505:22]
reg [1:0] uops_13_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_13_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_13_lrs2_rtype; // @[util.scala:505:22]
reg uops_13_frs3_en; // @[util.scala:505:22]
reg uops_13_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_13_fcn_op; // @[util.scala:505:22]
reg uops_13_fp_val; // @[util.scala:505:22]
reg [2:0] uops_13_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_13_fp_typ; // @[util.scala:505:22]
reg uops_13_xcpt_pf_if; // @[util.scala:505:22]
reg uops_13_xcpt_ae_if; // @[util.scala:505:22]
reg uops_13_xcpt_ma_if; // @[util.scala:505:22]
reg uops_13_bp_debug_if; // @[util.scala:505:22]
reg uops_13_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_13_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_13_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_14_inst; // @[util.scala:505:22]
reg [31:0] uops_14_debug_inst; // @[util.scala:505:22]
reg uops_14_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_14_debug_pc; // @[util.scala:505:22]
reg uops_14_iq_type_0; // @[util.scala:505:22]
reg uops_14_iq_type_1; // @[util.scala:505:22]
reg uops_14_iq_type_2; // @[util.scala:505:22]
reg uops_14_iq_type_3; // @[util.scala:505:22]
reg uops_14_fu_code_0; // @[util.scala:505:22]
reg uops_14_fu_code_1; // @[util.scala:505:22]
reg uops_14_fu_code_2; // @[util.scala:505:22]
reg uops_14_fu_code_3; // @[util.scala:505:22]
reg uops_14_fu_code_4; // @[util.scala:505:22]
reg uops_14_fu_code_5; // @[util.scala:505:22]
reg uops_14_fu_code_6; // @[util.scala:505:22]
reg uops_14_fu_code_7; // @[util.scala:505:22]
reg uops_14_fu_code_8; // @[util.scala:505:22]
reg uops_14_fu_code_9; // @[util.scala:505:22]
reg uops_14_iw_issued; // @[util.scala:505:22]
reg uops_14_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_14_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_14_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_14_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_14_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_14_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_14_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_14_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_14_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_14_br_mask_T_1 = uops_14_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_14_br_tag; // @[util.scala:505:22]
reg [3:0] uops_14_br_type; // @[util.scala:505:22]
reg uops_14_is_sfb; // @[util.scala:505:22]
reg uops_14_is_fence; // @[util.scala:505:22]
reg uops_14_is_fencei; // @[util.scala:505:22]
reg uops_14_is_sfence; // @[util.scala:505:22]
reg uops_14_is_amo; // @[util.scala:505:22]
reg uops_14_is_eret; // @[util.scala:505:22]
reg uops_14_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_14_is_rocc; // @[util.scala:505:22]
reg uops_14_is_mov; // @[util.scala:505:22]
reg [3:0] uops_14_ftq_idx; // @[util.scala:505:22]
reg uops_14_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_14_pc_lob; // @[util.scala:505:22]
reg uops_14_taken; // @[util.scala:505:22]
reg uops_14_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_14_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_14_pimm; // @[util.scala:505:22]
reg [19:0] uops_14_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_14_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_14_op2_sel; // @[util.scala:505:22]
reg uops_14_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_14_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_14_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_14_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_14_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_14_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_14_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_14_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_14_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_14_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_14_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_14_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_14_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_14_fp_ctrl_div; // @[util.scala:505:22]
reg uops_14_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_14_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_14_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_14_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_14_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_14_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_14_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_14_pdst; // @[util.scala:505:22]
reg [5:0] uops_14_prs1; // @[util.scala:505:22]
reg [5:0] uops_14_prs2; // @[util.scala:505:22]
reg [5:0] uops_14_prs3; // @[util.scala:505:22]
reg [3:0] uops_14_ppred; // @[util.scala:505:22]
reg uops_14_prs1_busy; // @[util.scala:505:22]
reg uops_14_prs2_busy; // @[util.scala:505:22]
reg uops_14_prs3_busy; // @[util.scala:505:22]
reg uops_14_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_14_stale_pdst; // @[util.scala:505:22]
reg uops_14_exception; // @[util.scala:505:22]
reg [63:0] uops_14_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_14_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_14_mem_size; // @[util.scala:505:22]
reg uops_14_mem_signed; // @[util.scala:505:22]
reg uops_14_uses_ldq; // @[util.scala:505:22]
reg uops_14_uses_stq; // @[util.scala:505:22]
reg uops_14_is_unique; // @[util.scala:505:22]
reg uops_14_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_14_csr_cmd; // @[util.scala:505:22]
reg uops_14_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_14_ldst; // @[util.scala:505:22]
reg [5:0] uops_14_lrs1; // @[util.scala:505:22]
reg [5:0] uops_14_lrs2; // @[util.scala:505:22]
reg [5:0] uops_14_lrs3; // @[util.scala:505:22]
reg [1:0] uops_14_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_14_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_14_lrs2_rtype; // @[util.scala:505:22]
reg uops_14_frs3_en; // @[util.scala:505:22]
reg uops_14_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_14_fcn_op; // @[util.scala:505:22]
reg uops_14_fp_val; // @[util.scala:505:22]
reg [2:0] uops_14_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_14_fp_typ; // @[util.scala:505:22]
reg uops_14_xcpt_pf_if; // @[util.scala:505:22]
reg uops_14_xcpt_ae_if; // @[util.scala:505:22]
reg uops_14_xcpt_ma_if; // @[util.scala:505:22]
reg uops_14_bp_debug_if; // @[util.scala:505:22]
reg uops_14_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_14_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_14_debug_tsrc; // @[util.scala:505:22]
reg [3:0] enq_ptr_value; // @[Counter.scala:61:40]
reg [3:0] deq_ptr_value; // @[Counter.scala:61:40]
reg maybe_full; // @[util.scala:509:29]
wire ptr_match = enq_ptr_value == deq_ptr_value; // @[Counter.scala:61:40]
wire _io_empty_T = ~maybe_full; // @[util.scala:509:29, :512:30]
assign _io_empty_T_1 = ptr_match & _io_empty_T; // @[util.scala:511:35, :512:{27,30}]
assign io_empty_0 = _io_empty_T_1; // @[util.scala:458:7, :512:27]
wire full = ptr_match & maybe_full; // @[util.scala:509:29, :511:35, :513:26]
wire _do_enq_T = io_enq_ready_0 & io_enq_valid_0; // @[Decoupled.scala:51:35]
wire _do_enq_T_5 = _do_enq_T; // @[Decoupled.scala:51:35]
wire _do_enq_T_8 = _do_enq_T_5; // @[util.scala:514:{39,99}]
wire do_enq = _do_enq_T_8; // @[util.scala:514:{26,99}]
wire [15:0] _GEN = {{valids_0}, {valids_14}, {valids_13}, {valids_12}, {valids_11}, {valids_10}, {valids_9}, {valids_8}, {valids_7}, {valids_6}, {valids_5}, {valids_4}, {valids_3}, {valids_2}, {valids_1}, {valids_0}}; // @[util.scala:504:26, :515:44]
wire _GEN_0 = _GEN[deq_ptr_value]; // @[Counter.scala:61:40]
wire _do_deq_T = ~_GEN_0; // @[util.scala:515:44]
wire _do_deq_T_1 = io_deq_ready_0 | _do_deq_T; // @[util.scala:458:7, :515:{41,44}]
wire _do_deq_T_2 = ~io_empty_0; // @[util.scala:458:7, :515:71]
wire _do_deq_T_3 = _do_deq_T_1 & _do_deq_T_2; // @[util.scala:515:{41,68,71}]
wire do_deq = _do_deq_T_3; // @[util.scala:515:{26,68}]
wire _valids_0_T_7 = _valids_0_T_4; // @[util.scala:520:{31,80}]
wire _valids_1_T_7 = _valids_1_T_4; // @[util.scala:520:{31,80}]
wire _valids_2_T_7 = _valids_2_T_4; // @[util.scala:520:{31,80}]
wire _valids_3_T_7 = _valids_3_T_4; // @[util.scala:520:{31,80}]
wire _valids_4_T_7 = _valids_4_T_4; // @[util.scala:520:{31,80}]
wire _valids_5_T_7 = _valids_5_T_4; // @[util.scala:520:{31,80}]
wire _valids_6_T_7 = _valids_6_T_4; // @[util.scala:520:{31,80}]
wire _valids_7_T_7 = _valids_7_T_4; // @[util.scala:520:{31,80}]
wire _valids_8_T_7 = _valids_8_T_4; // @[util.scala:520:{31,80}]
wire _valids_9_T_7 = _valids_9_T_4; // @[util.scala:520:{31,80}]
wire _valids_10_T_7 = _valids_10_T_4; // @[util.scala:520:{31,80}]
wire _valids_11_T_7 = _valids_11_T_4; // @[util.scala:520:{31,80}]
wire _valids_12_T_7 = _valids_12_T_4; // @[util.scala:520:{31,80}]
wire _valids_13_T_7 = _valids_13_T_4; // @[util.scala:520:{31,80}]
wire _valids_14_T_7 = _valids_14_T_4; // @[util.scala:520:{31,80}]
wire wrap = enq_ptr_value == 4'hE; // @[Counter.scala:61:40, :73:24]
wire [4:0] _GEN_1 = {1'h0, enq_ptr_value}; // @[Counter.scala:61:40, :77:24]
wire [4:0] _value_T = _GEN_1 + 5'h1; // @[Counter.scala:77:24]
wire [3:0] _value_T_1 = _value_T[3:0]; // @[Counter.scala:77:24]
wire wrap_1 = deq_ptr_value == 4'hE; // @[Counter.scala:61:40, :73:24]
wire [4:0] _GEN_2 = {1'h0, deq_ptr_value}; // @[Counter.scala:61:40, :77:24]
wire [4:0] _value_T_2 = _GEN_2 + 5'h1; // @[Counter.scala:77:24]
wire [3:0] _value_T_3 = _value_T_2[3:0]; // @[Counter.scala:77:24]
assign _io_enq_ready_T = ~full; // @[util.scala:513:26, :543:21]
assign io_enq_ready_0 = _io_enq_ready_T; // @[util.scala:458:7, :543:21]
assign io_deq_bits_uop_inst_0 = out_uop_inst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_debug_inst_0 = out_uop_debug_inst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_rvc_0 = out_uop_is_rvc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_debug_pc_0 = out_uop_debug_pc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iq_type_0_0 = out_uop_iq_type_0; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iq_type_1_0 = out_uop_iq_type_1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iq_type_2_0 = out_uop_iq_type_2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iq_type_3_0 = out_uop_iq_type_3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_0_0 = out_uop_fu_code_0; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_1_0 = out_uop_fu_code_1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_2_0 = out_uop_fu_code_2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_3_0 = out_uop_fu_code_3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_4_0 = out_uop_fu_code_4; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_5_0 = out_uop_fu_code_5; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_6_0 = out_uop_fu_code_6; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_7_0 = out_uop_fu_code_7; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_8_0 = out_uop_fu_code_8; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_9_0 = out_uop_fu_code_9; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_issued_0 = out_uop_iw_issued; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_issued_partial_agen_0 = out_uop_iw_issued_partial_agen; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_issued_partial_dgen_0 = out_uop_iw_issued_partial_dgen; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p1_speculative_child_0 = out_uop_iw_p1_speculative_child; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p2_speculative_child_0 = out_uop_iw_p2_speculative_child; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p1_bypass_hint_0 = out_uop_iw_p1_bypass_hint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p2_bypass_hint_0 = out_uop_iw_p2_bypass_hint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p3_bypass_hint_0 = out_uop_iw_p3_bypass_hint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_dis_col_sel_0 = out_uop_dis_col_sel; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_br_mask_0 = out_uop_br_mask; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_br_tag_0 = out_uop_br_tag; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_br_type_0 = out_uop_br_type; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_sfb_0 = out_uop_is_sfb; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_fence_0 = out_uop_is_fence; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_fencei_0 = out_uop_is_fencei; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_sfence_0 = out_uop_is_sfence; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_amo_0 = out_uop_is_amo; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_eret_0 = out_uop_is_eret; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_sys_pc2epc_0 = out_uop_is_sys_pc2epc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_rocc_0 = out_uop_is_rocc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_mov_0 = out_uop_is_mov; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ftq_idx_0 = out_uop_ftq_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_edge_inst_0 = out_uop_edge_inst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_pc_lob_0 = out_uop_pc_lob; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_taken_0 = out_uop_taken; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_imm_rename_0 = out_uop_imm_rename; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_imm_sel_0 = out_uop_imm_sel; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_pimm_0 = out_uop_pimm; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_imm_packed_0 = out_uop_imm_packed; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_op1_sel_0 = out_uop_op1_sel; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_op2_sel_0 = out_uop_op2_sel; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_ldst_0 = out_uop_fp_ctrl_ldst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_wen_0 = out_uop_fp_ctrl_wen; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_ren1_0 = out_uop_fp_ctrl_ren1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_ren2_0 = out_uop_fp_ctrl_ren2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_ren3_0 = out_uop_fp_ctrl_ren3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_swap12_0 = out_uop_fp_ctrl_swap12; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_swap23_0 = out_uop_fp_ctrl_swap23; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_typeTagIn_0 = out_uop_fp_ctrl_typeTagIn; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_typeTagOut_0 = out_uop_fp_ctrl_typeTagOut; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_fromint_0 = out_uop_fp_ctrl_fromint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_toint_0 = out_uop_fp_ctrl_toint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_fastpipe_0 = out_uop_fp_ctrl_fastpipe; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_fma_0 = out_uop_fp_ctrl_fma; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_div_0 = out_uop_fp_ctrl_div; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_sqrt_0 = out_uop_fp_ctrl_sqrt; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_wflags_0 = out_uop_fp_ctrl_wflags; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_vec_0 = out_uop_fp_ctrl_vec; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_rob_idx_0 = out_uop_rob_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ldq_idx_0 = out_uop_ldq_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_stq_idx_0 = out_uop_stq_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_rxq_idx_0 = out_uop_rxq_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_pdst_0 = out_uop_pdst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs1_0 = out_uop_prs1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs2_0 = out_uop_prs2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs3_0 = out_uop_prs3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ppred_0 = out_uop_ppred; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs1_busy_0 = out_uop_prs1_busy; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs2_busy_0 = out_uop_prs2_busy; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs3_busy_0 = out_uop_prs3_busy; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ppred_busy_0 = out_uop_ppred_busy; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_stale_pdst_0 = out_uop_stale_pdst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_exception_0 = out_uop_exception; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_exc_cause_0 = out_uop_exc_cause; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_mem_cmd_0 = out_uop_mem_cmd; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_mem_size_0 = out_uop_mem_size; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_mem_signed_0 = out_uop_mem_signed; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_uses_ldq_0 = out_uop_uses_ldq; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_uses_stq_0 = out_uop_uses_stq; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_unique_0 = out_uop_is_unique; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_flush_on_commit_0 = out_uop_flush_on_commit; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_csr_cmd_0 = out_uop_csr_cmd; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ldst_is_rs1_0 = out_uop_ldst_is_rs1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ldst_0 = out_uop_ldst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs1_0 = out_uop_lrs1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs2_0 = out_uop_lrs2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs3_0 = out_uop_lrs3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_dst_rtype_0 = out_uop_dst_rtype; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs1_rtype_0 = out_uop_lrs1_rtype; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs2_rtype_0 = out_uop_lrs2_rtype; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_frs3_en_0 = out_uop_frs3_en; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fcn_dw_0 = out_uop_fcn_dw; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fcn_op_0 = out_uop_fcn_op; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_val_0 = out_uop_fp_val; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_rm_0 = out_uop_fp_rm; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_typ_0 = out_uop_fp_typ; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_xcpt_pf_if_0 = out_uop_xcpt_pf_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_xcpt_ae_if_0 = out_uop_xcpt_ae_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_xcpt_ma_if_0 = out_uop_xcpt_ma_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_bp_debug_if_0 = out_uop_bp_debug_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_bp_xcpt_if_0 = out_uop_bp_xcpt_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_debug_fsrc_0 = out_uop_debug_fsrc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_debug_tsrc_0 = out_uop_debug_tsrc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_addr_0 = out_addr; // @[util.scala:458:7, :545:19]
assign io_deq_bits_data_0 = out_data; // @[util.scala:458:7, :545:19]
assign io_deq_bits_is_hella_0 = out_is_hella; // @[util.scala:458:7, :545:19]
assign io_deq_bits_tag_match_0 = out_tag_match; // @[util.scala:458:7, :545:19]
assign io_deq_bits_old_meta_coh_state_0 = out_old_meta_coh_state; // @[util.scala:458:7, :545:19]
assign io_deq_bits_old_meta_tag_0 = out_old_meta_tag; // @[util.scala:458:7, :545:19]
assign io_deq_bits_way_en_0 = out_way_en; // @[util.scala:458:7, :545:19]
assign io_deq_bits_sdq_id_0 = out_sdq_id; // @[util.scala:458:7, :545:19]
wire [15:0][31:0] _GEN_3 = {{uops_0_inst}, {uops_14_inst}, {uops_13_inst}, {uops_12_inst}, {uops_11_inst}, {uops_10_inst}, {uops_9_inst}, {uops_8_inst}, {uops_7_inst}, {uops_6_inst}, {uops_5_inst}, {uops_4_inst}, {uops_3_inst}, {uops_2_inst}, {uops_1_inst}, {uops_0_inst}}; // @[util.scala:505:22, :547:21]
assign out_uop_inst = _GEN_3[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][31:0] _GEN_4 = {{uops_0_debug_inst}, {uops_14_debug_inst}, {uops_13_debug_inst}, {uops_12_debug_inst}, {uops_11_debug_inst}, {uops_10_debug_inst}, {uops_9_debug_inst}, {uops_8_debug_inst}, {uops_7_debug_inst}, {uops_6_debug_inst}, {uops_5_debug_inst}, {uops_4_debug_inst}, {uops_3_debug_inst}, {uops_2_debug_inst}, {uops_1_debug_inst}, {uops_0_debug_inst}}; // @[util.scala:505:22, :547:21]
assign out_uop_debug_inst = _GEN_4[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_5 = {{uops_0_is_rvc}, {uops_14_is_rvc}, {uops_13_is_rvc}, {uops_12_is_rvc}, {uops_11_is_rvc}, {uops_10_is_rvc}, {uops_9_is_rvc}, {uops_8_is_rvc}, {uops_7_is_rvc}, {uops_6_is_rvc}, {uops_5_is_rvc}, {uops_4_is_rvc}, {uops_3_is_rvc}, {uops_2_is_rvc}, {uops_1_is_rvc}, {uops_0_is_rvc}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_rvc = _GEN_5[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][33:0] _GEN_6 = {{uops_0_debug_pc}, {uops_14_debug_pc}, {uops_13_debug_pc}, {uops_12_debug_pc}, {uops_11_debug_pc}, {uops_10_debug_pc}, {uops_9_debug_pc}, {uops_8_debug_pc}, {uops_7_debug_pc}, {uops_6_debug_pc}, {uops_5_debug_pc}, {uops_4_debug_pc}, {uops_3_debug_pc}, {uops_2_debug_pc}, {uops_1_debug_pc}, {uops_0_debug_pc}}; // @[util.scala:505:22, :547:21]
assign out_uop_debug_pc = _GEN_6[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_7 = {{uops_0_iq_type_0}, {uops_14_iq_type_0}, {uops_13_iq_type_0}, {uops_12_iq_type_0}, {uops_11_iq_type_0}, {uops_10_iq_type_0}, {uops_9_iq_type_0}, {uops_8_iq_type_0}, {uops_7_iq_type_0}, {uops_6_iq_type_0}, {uops_5_iq_type_0}, {uops_4_iq_type_0}, {uops_3_iq_type_0}, {uops_2_iq_type_0}, {uops_1_iq_type_0}, {uops_0_iq_type_0}}; // @[util.scala:505:22, :547:21]
assign out_uop_iq_type_0 = _GEN_7[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_8 = {{uops_0_iq_type_1}, {uops_14_iq_type_1}, {uops_13_iq_type_1}, {uops_12_iq_type_1}, {uops_11_iq_type_1}, {uops_10_iq_type_1}, {uops_9_iq_type_1}, {uops_8_iq_type_1}, {uops_7_iq_type_1}, {uops_6_iq_type_1}, {uops_5_iq_type_1}, {uops_4_iq_type_1}, {uops_3_iq_type_1}, {uops_2_iq_type_1}, {uops_1_iq_type_1}, {uops_0_iq_type_1}}; // @[util.scala:505:22, :547:21]
assign out_uop_iq_type_1 = _GEN_8[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_9 = {{uops_0_iq_type_2}, {uops_14_iq_type_2}, {uops_13_iq_type_2}, {uops_12_iq_type_2}, {uops_11_iq_type_2}, {uops_10_iq_type_2}, {uops_9_iq_type_2}, {uops_8_iq_type_2}, {uops_7_iq_type_2}, {uops_6_iq_type_2}, {uops_5_iq_type_2}, {uops_4_iq_type_2}, {uops_3_iq_type_2}, {uops_2_iq_type_2}, {uops_1_iq_type_2}, {uops_0_iq_type_2}}; // @[util.scala:505:22, :547:21]
assign out_uop_iq_type_2 = _GEN_9[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_10 = {{uops_0_iq_type_3}, {uops_14_iq_type_3}, {uops_13_iq_type_3}, {uops_12_iq_type_3}, {uops_11_iq_type_3}, {uops_10_iq_type_3}, {uops_9_iq_type_3}, {uops_8_iq_type_3}, {uops_7_iq_type_3}, {uops_6_iq_type_3}, {uops_5_iq_type_3}, {uops_4_iq_type_3}, {uops_3_iq_type_3}, {uops_2_iq_type_3}, {uops_1_iq_type_3}, {uops_0_iq_type_3}}; // @[util.scala:505:22, :547:21]
assign out_uop_iq_type_3 = _GEN_10[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_11 = {{uops_0_fu_code_0}, {uops_14_fu_code_0}, {uops_13_fu_code_0}, {uops_12_fu_code_0}, {uops_11_fu_code_0}, {uops_10_fu_code_0}, {uops_9_fu_code_0}, {uops_8_fu_code_0}, {uops_7_fu_code_0}, {uops_6_fu_code_0}, {uops_5_fu_code_0}, {uops_4_fu_code_0}, {uops_3_fu_code_0}, {uops_2_fu_code_0}, {uops_1_fu_code_0}, {uops_0_fu_code_0}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_0 = _GEN_11[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_12 = {{uops_0_fu_code_1}, {uops_14_fu_code_1}, {uops_13_fu_code_1}, {uops_12_fu_code_1}, {uops_11_fu_code_1}, {uops_10_fu_code_1}, {uops_9_fu_code_1}, {uops_8_fu_code_1}, {uops_7_fu_code_1}, {uops_6_fu_code_1}, {uops_5_fu_code_1}, {uops_4_fu_code_1}, {uops_3_fu_code_1}, {uops_2_fu_code_1}, {uops_1_fu_code_1}, {uops_0_fu_code_1}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_1 = _GEN_12[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_13 = {{uops_0_fu_code_2}, {uops_14_fu_code_2}, {uops_13_fu_code_2}, {uops_12_fu_code_2}, {uops_11_fu_code_2}, {uops_10_fu_code_2}, {uops_9_fu_code_2}, {uops_8_fu_code_2}, {uops_7_fu_code_2}, {uops_6_fu_code_2}, {uops_5_fu_code_2}, {uops_4_fu_code_2}, {uops_3_fu_code_2}, {uops_2_fu_code_2}, {uops_1_fu_code_2}, {uops_0_fu_code_2}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_2 = _GEN_13[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_14 = {{uops_0_fu_code_3}, {uops_14_fu_code_3}, {uops_13_fu_code_3}, {uops_12_fu_code_3}, {uops_11_fu_code_3}, {uops_10_fu_code_3}, {uops_9_fu_code_3}, {uops_8_fu_code_3}, {uops_7_fu_code_3}, {uops_6_fu_code_3}, {uops_5_fu_code_3}, {uops_4_fu_code_3}, {uops_3_fu_code_3}, {uops_2_fu_code_3}, {uops_1_fu_code_3}, {uops_0_fu_code_3}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_3 = _GEN_14[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_15 = {{uops_0_fu_code_4}, {uops_14_fu_code_4}, {uops_13_fu_code_4}, {uops_12_fu_code_4}, {uops_11_fu_code_4}, {uops_10_fu_code_4}, {uops_9_fu_code_4}, {uops_8_fu_code_4}, {uops_7_fu_code_4}, {uops_6_fu_code_4}, {uops_5_fu_code_4}, {uops_4_fu_code_4}, {uops_3_fu_code_4}, {uops_2_fu_code_4}, {uops_1_fu_code_4}, {uops_0_fu_code_4}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_4 = _GEN_15[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_16 = {{uops_0_fu_code_5}, {uops_14_fu_code_5}, {uops_13_fu_code_5}, {uops_12_fu_code_5}, {uops_11_fu_code_5}, {uops_10_fu_code_5}, {uops_9_fu_code_5}, {uops_8_fu_code_5}, {uops_7_fu_code_5}, {uops_6_fu_code_5}, {uops_5_fu_code_5}, {uops_4_fu_code_5}, {uops_3_fu_code_5}, {uops_2_fu_code_5}, {uops_1_fu_code_5}, {uops_0_fu_code_5}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_5 = _GEN_16[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_17 = {{uops_0_fu_code_6}, {uops_14_fu_code_6}, {uops_13_fu_code_6}, {uops_12_fu_code_6}, {uops_11_fu_code_6}, {uops_10_fu_code_6}, {uops_9_fu_code_6}, {uops_8_fu_code_6}, {uops_7_fu_code_6}, {uops_6_fu_code_6}, {uops_5_fu_code_6}, {uops_4_fu_code_6}, {uops_3_fu_code_6}, {uops_2_fu_code_6}, {uops_1_fu_code_6}, {uops_0_fu_code_6}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_6 = _GEN_17[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_18 = {{uops_0_fu_code_7}, {uops_14_fu_code_7}, {uops_13_fu_code_7}, {uops_12_fu_code_7}, {uops_11_fu_code_7}, {uops_10_fu_code_7}, {uops_9_fu_code_7}, {uops_8_fu_code_7}, {uops_7_fu_code_7}, {uops_6_fu_code_7}, {uops_5_fu_code_7}, {uops_4_fu_code_7}, {uops_3_fu_code_7}, {uops_2_fu_code_7}, {uops_1_fu_code_7}, {uops_0_fu_code_7}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_7 = _GEN_18[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_19 = {{uops_0_fu_code_8}, {uops_14_fu_code_8}, {uops_13_fu_code_8}, {uops_12_fu_code_8}, {uops_11_fu_code_8}, {uops_10_fu_code_8}, {uops_9_fu_code_8}, {uops_8_fu_code_8}, {uops_7_fu_code_8}, {uops_6_fu_code_8}, {uops_5_fu_code_8}, {uops_4_fu_code_8}, {uops_3_fu_code_8}, {uops_2_fu_code_8}, {uops_1_fu_code_8}, {uops_0_fu_code_8}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_8 = _GEN_19[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_20 = {{uops_0_fu_code_9}, {uops_14_fu_code_9}, {uops_13_fu_code_9}, {uops_12_fu_code_9}, {uops_11_fu_code_9}, {uops_10_fu_code_9}, {uops_9_fu_code_9}, {uops_8_fu_code_9}, {uops_7_fu_code_9}, {uops_6_fu_code_9}, {uops_5_fu_code_9}, {uops_4_fu_code_9}, {uops_3_fu_code_9}, {uops_2_fu_code_9}, {uops_1_fu_code_9}, {uops_0_fu_code_9}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_9 = _GEN_20[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_21 = {{uops_0_iw_issued}, {uops_14_iw_issued}, {uops_13_iw_issued}, {uops_12_iw_issued}, {uops_11_iw_issued}, {uops_10_iw_issued}, {uops_9_iw_issued}, {uops_8_iw_issued}, {uops_7_iw_issued}, {uops_6_iw_issued}, {uops_5_iw_issued}, {uops_4_iw_issued}, {uops_3_iw_issued}, {uops_2_iw_issued}, {uops_1_iw_issued}, {uops_0_iw_issued}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_issued = _GEN_21[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_22 = {{uops_0_iw_issued_partial_agen}, {uops_14_iw_issued_partial_agen}, {uops_13_iw_issued_partial_agen}, {uops_12_iw_issued_partial_agen}, {uops_11_iw_issued_partial_agen}, {uops_10_iw_issued_partial_agen}, {uops_9_iw_issued_partial_agen}, {uops_8_iw_issued_partial_agen}, {uops_7_iw_issued_partial_agen}, {uops_6_iw_issued_partial_agen}, {uops_5_iw_issued_partial_agen}, {uops_4_iw_issued_partial_agen}, {uops_3_iw_issued_partial_agen}, {uops_2_iw_issued_partial_agen}, {uops_1_iw_issued_partial_agen}, {uops_0_iw_issued_partial_agen}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_issued_partial_agen = _GEN_22[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_23 = {{uops_0_iw_issued_partial_dgen}, {uops_14_iw_issued_partial_dgen}, {uops_13_iw_issued_partial_dgen}, {uops_12_iw_issued_partial_dgen}, {uops_11_iw_issued_partial_dgen}, {uops_10_iw_issued_partial_dgen}, {uops_9_iw_issued_partial_dgen}, {uops_8_iw_issued_partial_dgen}, {uops_7_iw_issued_partial_dgen}, {uops_6_iw_issued_partial_dgen}, {uops_5_iw_issued_partial_dgen}, {uops_4_iw_issued_partial_dgen}, {uops_3_iw_issued_partial_dgen}, {uops_2_iw_issued_partial_dgen}, {uops_1_iw_issued_partial_dgen}, {uops_0_iw_issued_partial_dgen}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_issued_partial_dgen = _GEN_23[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_24 = {{uops_0_iw_p1_speculative_child}, {uops_14_iw_p1_speculative_child}, {uops_13_iw_p1_speculative_child}, {uops_12_iw_p1_speculative_child}, {uops_11_iw_p1_speculative_child}, {uops_10_iw_p1_speculative_child}, {uops_9_iw_p1_speculative_child}, {uops_8_iw_p1_speculative_child}, {uops_7_iw_p1_speculative_child}, {uops_6_iw_p1_speculative_child}, {uops_5_iw_p1_speculative_child}, {uops_4_iw_p1_speculative_child}, {uops_3_iw_p1_speculative_child}, {uops_2_iw_p1_speculative_child}, {uops_1_iw_p1_speculative_child}, {uops_0_iw_p1_speculative_child}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p1_speculative_child = _GEN_24[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_25 = {{uops_0_iw_p2_speculative_child}, {uops_14_iw_p2_speculative_child}, {uops_13_iw_p2_speculative_child}, {uops_12_iw_p2_speculative_child}, {uops_11_iw_p2_speculative_child}, {uops_10_iw_p2_speculative_child}, {uops_9_iw_p2_speculative_child}, {uops_8_iw_p2_speculative_child}, {uops_7_iw_p2_speculative_child}, {uops_6_iw_p2_speculative_child}, {uops_5_iw_p2_speculative_child}, {uops_4_iw_p2_speculative_child}, {uops_3_iw_p2_speculative_child}, {uops_2_iw_p2_speculative_child}, {uops_1_iw_p2_speculative_child}, {uops_0_iw_p2_speculative_child}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p2_speculative_child = _GEN_25[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_26 = {{uops_0_iw_p1_bypass_hint}, {uops_14_iw_p1_bypass_hint}, {uops_13_iw_p1_bypass_hint}, {uops_12_iw_p1_bypass_hint}, {uops_11_iw_p1_bypass_hint}, {uops_10_iw_p1_bypass_hint}, {uops_9_iw_p1_bypass_hint}, {uops_8_iw_p1_bypass_hint}, {uops_7_iw_p1_bypass_hint}, {uops_6_iw_p1_bypass_hint}, {uops_5_iw_p1_bypass_hint}, {uops_4_iw_p1_bypass_hint}, {uops_3_iw_p1_bypass_hint}, {uops_2_iw_p1_bypass_hint}, {uops_1_iw_p1_bypass_hint}, {uops_0_iw_p1_bypass_hint}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p1_bypass_hint = _GEN_26[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_27 = {{uops_0_iw_p2_bypass_hint}, {uops_14_iw_p2_bypass_hint}, {uops_13_iw_p2_bypass_hint}, {uops_12_iw_p2_bypass_hint}, {uops_11_iw_p2_bypass_hint}, {uops_10_iw_p2_bypass_hint}, {uops_9_iw_p2_bypass_hint}, {uops_8_iw_p2_bypass_hint}, {uops_7_iw_p2_bypass_hint}, {uops_6_iw_p2_bypass_hint}, {uops_5_iw_p2_bypass_hint}, {uops_4_iw_p2_bypass_hint}, {uops_3_iw_p2_bypass_hint}, {uops_2_iw_p2_bypass_hint}, {uops_1_iw_p2_bypass_hint}, {uops_0_iw_p2_bypass_hint}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p2_bypass_hint = _GEN_27[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_28 = {{uops_0_iw_p3_bypass_hint}, {uops_14_iw_p3_bypass_hint}, {uops_13_iw_p3_bypass_hint}, {uops_12_iw_p3_bypass_hint}, {uops_11_iw_p3_bypass_hint}, {uops_10_iw_p3_bypass_hint}, {uops_9_iw_p3_bypass_hint}, {uops_8_iw_p3_bypass_hint}, {uops_7_iw_p3_bypass_hint}, {uops_6_iw_p3_bypass_hint}, {uops_5_iw_p3_bypass_hint}, {uops_4_iw_p3_bypass_hint}, {uops_3_iw_p3_bypass_hint}, {uops_2_iw_p3_bypass_hint}, {uops_1_iw_p3_bypass_hint}, {uops_0_iw_p3_bypass_hint}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p3_bypass_hint = _GEN_28[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_29 = {{uops_0_dis_col_sel}, {uops_14_dis_col_sel}, {uops_13_dis_col_sel}, {uops_12_dis_col_sel}, {uops_11_dis_col_sel}, {uops_10_dis_col_sel}, {uops_9_dis_col_sel}, {uops_8_dis_col_sel}, {uops_7_dis_col_sel}, {uops_6_dis_col_sel}, {uops_5_dis_col_sel}, {uops_4_dis_col_sel}, {uops_3_dis_col_sel}, {uops_2_dis_col_sel}, {uops_1_dis_col_sel}, {uops_0_dis_col_sel}}; // @[util.scala:505:22, :547:21]
assign out_uop_dis_col_sel = _GEN_29[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_30 = {{uops_0_br_mask}, {uops_14_br_mask}, {uops_13_br_mask}, {uops_12_br_mask}, {uops_11_br_mask}, {uops_10_br_mask}, {uops_9_br_mask}, {uops_8_br_mask}, {uops_7_br_mask}, {uops_6_br_mask}, {uops_5_br_mask}, {uops_4_br_mask}, {uops_3_br_mask}, {uops_2_br_mask}, {uops_1_br_mask}, {uops_0_br_mask}}; // @[util.scala:505:22, :547:21]
assign out_uop_br_mask = _GEN_30[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_31 = {{uops_0_br_tag}, {uops_14_br_tag}, {uops_13_br_tag}, {uops_12_br_tag}, {uops_11_br_tag}, {uops_10_br_tag}, {uops_9_br_tag}, {uops_8_br_tag}, {uops_7_br_tag}, {uops_6_br_tag}, {uops_5_br_tag}, {uops_4_br_tag}, {uops_3_br_tag}, {uops_2_br_tag}, {uops_1_br_tag}, {uops_0_br_tag}}; // @[util.scala:505:22, :547:21]
assign out_uop_br_tag = _GEN_31[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_32 = {{uops_0_br_type}, {uops_14_br_type}, {uops_13_br_type}, {uops_12_br_type}, {uops_11_br_type}, {uops_10_br_type}, {uops_9_br_type}, {uops_8_br_type}, {uops_7_br_type}, {uops_6_br_type}, {uops_5_br_type}, {uops_4_br_type}, {uops_3_br_type}, {uops_2_br_type}, {uops_1_br_type}, {uops_0_br_type}}; // @[util.scala:505:22, :547:21]
assign out_uop_br_type = _GEN_32[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_33 = {{uops_0_is_sfb}, {uops_14_is_sfb}, {uops_13_is_sfb}, {uops_12_is_sfb}, {uops_11_is_sfb}, {uops_10_is_sfb}, {uops_9_is_sfb}, {uops_8_is_sfb}, {uops_7_is_sfb}, {uops_6_is_sfb}, {uops_5_is_sfb}, {uops_4_is_sfb}, {uops_3_is_sfb}, {uops_2_is_sfb}, {uops_1_is_sfb}, {uops_0_is_sfb}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_sfb = _GEN_33[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_34 = {{uops_0_is_fence}, {uops_14_is_fence}, {uops_13_is_fence}, {uops_12_is_fence}, {uops_11_is_fence}, {uops_10_is_fence}, {uops_9_is_fence}, {uops_8_is_fence}, {uops_7_is_fence}, {uops_6_is_fence}, {uops_5_is_fence}, {uops_4_is_fence}, {uops_3_is_fence}, {uops_2_is_fence}, {uops_1_is_fence}, {uops_0_is_fence}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_fence = _GEN_34[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_35 = {{uops_0_is_fencei}, {uops_14_is_fencei}, {uops_13_is_fencei}, {uops_12_is_fencei}, {uops_11_is_fencei}, {uops_10_is_fencei}, {uops_9_is_fencei}, {uops_8_is_fencei}, {uops_7_is_fencei}, {uops_6_is_fencei}, {uops_5_is_fencei}, {uops_4_is_fencei}, {uops_3_is_fencei}, {uops_2_is_fencei}, {uops_1_is_fencei}, {uops_0_is_fencei}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_fencei = _GEN_35[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_36 = {{uops_0_is_sfence}, {uops_14_is_sfence}, {uops_13_is_sfence}, {uops_12_is_sfence}, {uops_11_is_sfence}, {uops_10_is_sfence}, {uops_9_is_sfence}, {uops_8_is_sfence}, {uops_7_is_sfence}, {uops_6_is_sfence}, {uops_5_is_sfence}, {uops_4_is_sfence}, {uops_3_is_sfence}, {uops_2_is_sfence}, {uops_1_is_sfence}, {uops_0_is_sfence}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_sfence = _GEN_36[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_37 = {{uops_0_is_amo}, {uops_14_is_amo}, {uops_13_is_amo}, {uops_12_is_amo}, {uops_11_is_amo}, {uops_10_is_amo}, {uops_9_is_amo}, {uops_8_is_amo}, {uops_7_is_amo}, {uops_6_is_amo}, {uops_5_is_amo}, {uops_4_is_amo}, {uops_3_is_amo}, {uops_2_is_amo}, {uops_1_is_amo}, {uops_0_is_amo}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_amo = _GEN_37[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_38 = {{uops_0_is_eret}, {uops_14_is_eret}, {uops_13_is_eret}, {uops_12_is_eret}, {uops_11_is_eret}, {uops_10_is_eret}, {uops_9_is_eret}, {uops_8_is_eret}, {uops_7_is_eret}, {uops_6_is_eret}, {uops_5_is_eret}, {uops_4_is_eret}, {uops_3_is_eret}, {uops_2_is_eret}, {uops_1_is_eret}, {uops_0_is_eret}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_eret = _GEN_38[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_39 = {{uops_0_is_sys_pc2epc}, {uops_14_is_sys_pc2epc}, {uops_13_is_sys_pc2epc}, {uops_12_is_sys_pc2epc}, {uops_11_is_sys_pc2epc}, {uops_10_is_sys_pc2epc}, {uops_9_is_sys_pc2epc}, {uops_8_is_sys_pc2epc}, {uops_7_is_sys_pc2epc}, {uops_6_is_sys_pc2epc}, {uops_5_is_sys_pc2epc}, {uops_4_is_sys_pc2epc}, {uops_3_is_sys_pc2epc}, {uops_2_is_sys_pc2epc}, {uops_1_is_sys_pc2epc}, {uops_0_is_sys_pc2epc}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_sys_pc2epc = _GEN_39[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_40 = {{uops_0_is_rocc}, {uops_14_is_rocc}, {uops_13_is_rocc}, {uops_12_is_rocc}, {uops_11_is_rocc}, {uops_10_is_rocc}, {uops_9_is_rocc}, {uops_8_is_rocc}, {uops_7_is_rocc}, {uops_6_is_rocc}, {uops_5_is_rocc}, {uops_4_is_rocc}, {uops_3_is_rocc}, {uops_2_is_rocc}, {uops_1_is_rocc}, {uops_0_is_rocc}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_rocc = _GEN_40[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_41 = {{uops_0_is_mov}, {uops_14_is_mov}, {uops_13_is_mov}, {uops_12_is_mov}, {uops_11_is_mov}, {uops_10_is_mov}, {uops_9_is_mov}, {uops_8_is_mov}, {uops_7_is_mov}, {uops_6_is_mov}, {uops_5_is_mov}, {uops_4_is_mov}, {uops_3_is_mov}, {uops_2_is_mov}, {uops_1_is_mov}, {uops_0_is_mov}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_mov = _GEN_41[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_42 = {{uops_0_ftq_idx}, {uops_14_ftq_idx}, {uops_13_ftq_idx}, {uops_12_ftq_idx}, {uops_11_ftq_idx}, {uops_10_ftq_idx}, {uops_9_ftq_idx}, {uops_8_ftq_idx}, {uops_7_ftq_idx}, {uops_6_ftq_idx}, {uops_5_ftq_idx}, {uops_4_ftq_idx}, {uops_3_ftq_idx}, {uops_2_ftq_idx}, {uops_1_ftq_idx}, {uops_0_ftq_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_ftq_idx = _GEN_42[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_43 = {{uops_0_edge_inst}, {uops_14_edge_inst}, {uops_13_edge_inst}, {uops_12_edge_inst}, {uops_11_edge_inst}, {uops_10_edge_inst}, {uops_9_edge_inst}, {uops_8_edge_inst}, {uops_7_edge_inst}, {uops_6_edge_inst}, {uops_5_edge_inst}, {uops_4_edge_inst}, {uops_3_edge_inst}, {uops_2_edge_inst}, {uops_1_edge_inst}, {uops_0_edge_inst}}; // @[util.scala:505:22, :547:21]
assign out_uop_edge_inst = _GEN_43[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_44 = {{uops_0_pc_lob}, {uops_14_pc_lob}, {uops_13_pc_lob}, {uops_12_pc_lob}, {uops_11_pc_lob}, {uops_10_pc_lob}, {uops_9_pc_lob}, {uops_8_pc_lob}, {uops_7_pc_lob}, {uops_6_pc_lob}, {uops_5_pc_lob}, {uops_4_pc_lob}, {uops_3_pc_lob}, {uops_2_pc_lob}, {uops_1_pc_lob}, {uops_0_pc_lob}}; // @[util.scala:505:22, :547:21]
assign out_uop_pc_lob = _GEN_44[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_45 = {{uops_0_taken}, {uops_14_taken}, {uops_13_taken}, {uops_12_taken}, {uops_11_taken}, {uops_10_taken}, {uops_9_taken}, {uops_8_taken}, {uops_7_taken}, {uops_6_taken}, {uops_5_taken}, {uops_4_taken}, {uops_3_taken}, {uops_2_taken}, {uops_1_taken}, {uops_0_taken}}; // @[util.scala:505:22, :547:21]
assign out_uop_taken = _GEN_45[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_46 = {{uops_0_imm_rename}, {uops_14_imm_rename}, {uops_13_imm_rename}, {uops_12_imm_rename}, {uops_11_imm_rename}, {uops_10_imm_rename}, {uops_9_imm_rename}, {uops_8_imm_rename}, {uops_7_imm_rename}, {uops_6_imm_rename}, {uops_5_imm_rename}, {uops_4_imm_rename}, {uops_3_imm_rename}, {uops_2_imm_rename}, {uops_1_imm_rename}, {uops_0_imm_rename}}; // @[util.scala:505:22, :547:21]
assign out_uop_imm_rename = _GEN_46[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][2:0] _GEN_47 = {{uops_0_imm_sel}, {uops_14_imm_sel}, {uops_13_imm_sel}, {uops_12_imm_sel}, {uops_11_imm_sel}, {uops_10_imm_sel}, {uops_9_imm_sel}, {uops_8_imm_sel}, {uops_7_imm_sel}, {uops_6_imm_sel}, {uops_5_imm_sel}, {uops_4_imm_sel}, {uops_3_imm_sel}, {uops_2_imm_sel}, {uops_1_imm_sel}, {uops_0_imm_sel}}; // @[util.scala:505:22, :547:21]
assign out_uop_imm_sel = _GEN_47[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][4:0] _GEN_48 = {{uops_0_pimm}, {uops_14_pimm}, {uops_13_pimm}, {uops_12_pimm}, {uops_11_pimm}, {uops_10_pimm}, {uops_9_pimm}, {uops_8_pimm}, {uops_7_pimm}, {uops_6_pimm}, {uops_5_pimm}, {uops_4_pimm}, {uops_3_pimm}, {uops_2_pimm}, {uops_1_pimm}, {uops_0_pimm}}; // @[util.scala:505:22, :547:21]
assign out_uop_pimm = _GEN_48[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][19:0] _GEN_49 = {{uops_0_imm_packed}, {uops_14_imm_packed}, {uops_13_imm_packed}, {uops_12_imm_packed}, {uops_11_imm_packed}, {uops_10_imm_packed}, {uops_9_imm_packed}, {uops_8_imm_packed}, {uops_7_imm_packed}, {uops_6_imm_packed}, {uops_5_imm_packed}, {uops_4_imm_packed}, {uops_3_imm_packed}, {uops_2_imm_packed}, {uops_1_imm_packed}, {uops_0_imm_packed}}; // @[util.scala:505:22, :547:21]
assign out_uop_imm_packed = _GEN_49[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_50 = {{uops_0_op1_sel}, {uops_14_op1_sel}, {uops_13_op1_sel}, {uops_12_op1_sel}, {uops_11_op1_sel}, {uops_10_op1_sel}, {uops_9_op1_sel}, {uops_8_op1_sel}, {uops_7_op1_sel}, {uops_6_op1_sel}, {uops_5_op1_sel}, {uops_4_op1_sel}, {uops_3_op1_sel}, {uops_2_op1_sel}, {uops_1_op1_sel}, {uops_0_op1_sel}}; // @[util.scala:505:22, :547:21]
assign out_uop_op1_sel = _GEN_50[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][2:0] _GEN_51 = {{uops_0_op2_sel}, {uops_14_op2_sel}, {uops_13_op2_sel}, {uops_12_op2_sel}, {uops_11_op2_sel}, {uops_10_op2_sel}, {uops_9_op2_sel}, {uops_8_op2_sel}, {uops_7_op2_sel}, {uops_6_op2_sel}, {uops_5_op2_sel}, {uops_4_op2_sel}, {uops_3_op2_sel}, {uops_2_op2_sel}, {uops_1_op2_sel}, {uops_0_op2_sel}}; // @[util.scala:505:22, :547:21]
assign out_uop_op2_sel = _GEN_51[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_52 = {{uops_0_fp_ctrl_ldst}, {uops_14_fp_ctrl_ldst}, {uops_13_fp_ctrl_ldst}, {uops_12_fp_ctrl_ldst}, {uops_11_fp_ctrl_ldst}, {uops_10_fp_ctrl_ldst}, {uops_9_fp_ctrl_ldst}, {uops_8_fp_ctrl_ldst}, {uops_7_fp_ctrl_ldst}, {uops_6_fp_ctrl_ldst}, {uops_5_fp_ctrl_ldst}, {uops_4_fp_ctrl_ldst}, {uops_3_fp_ctrl_ldst}, {uops_2_fp_ctrl_ldst}, {uops_1_fp_ctrl_ldst}, {uops_0_fp_ctrl_ldst}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_ldst = _GEN_52[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_53 = {{uops_0_fp_ctrl_wen}, {uops_14_fp_ctrl_wen}, {uops_13_fp_ctrl_wen}, {uops_12_fp_ctrl_wen}, {uops_11_fp_ctrl_wen}, {uops_10_fp_ctrl_wen}, {uops_9_fp_ctrl_wen}, {uops_8_fp_ctrl_wen}, {uops_7_fp_ctrl_wen}, {uops_6_fp_ctrl_wen}, {uops_5_fp_ctrl_wen}, {uops_4_fp_ctrl_wen}, {uops_3_fp_ctrl_wen}, {uops_2_fp_ctrl_wen}, {uops_1_fp_ctrl_wen}, {uops_0_fp_ctrl_wen}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_wen = _GEN_53[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_54 = {{uops_0_fp_ctrl_ren1}, {uops_14_fp_ctrl_ren1}, {uops_13_fp_ctrl_ren1}, {uops_12_fp_ctrl_ren1}, {uops_11_fp_ctrl_ren1}, {uops_10_fp_ctrl_ren1}, {uops_9_fp_ctrl_ren1}, {uops_8_fp_ctrl_ren1}, {uops_7_fp_ctrl_ren1}, {uops_6_fp_ctrl_ren1}, {uops_5_fp_ctrl_ren1}, {uops_4_fp_ctrl_ren1}, {uops_3_fp_ctrl_ren1}, {uops_2_fp_ctrl_ren1}, {uops_1_fp_ctrl_ren1}, {uops_0_fp_ctrl_ren1}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_ren1 = _GEN_54[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_55 = {{uops_0_fp_ctrl_ren2}, {uops_14_fp_ctrl_ren2}, {uops_13_fp_ctrl_ren2}, {uops_12_fp_ctrl_ren2}, {uops_11_fp_ctrl_ren2}, {uops_10_fp_ctrl_ren2}, {uops_9_fp_ctrl_ren2}, {uops_8_fp_ctrl_ren2}, {uops_7_fp_ctrl_ren2}, {uops_6_fp_ctrl_ren2}, {uops_5_fp_ctrl_ren2}, {uops_4_fp_ctrl_ren2}, {uops_3_fp_ctrl_ren2}, {uops_2_fp_ctrl_ren2}, {uops_1_fp_ctrl_ren2}, {uops_0_fp_ctrl_ren2}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_ren2 = _GEN_55[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_56 = {{uops_0_fp_ctrl_ren3}, {uops_14_fp_ctrl_ren3}, {uops_13_fp_ctrl_ren3}, {uops_12_fp_ctrl_ren3}, {uops_11_fp_ctrl_ren3}, {uops_10_fp_ctrl_ren3}, {uops_9_fp_ctrl_ren3}, {uops_8_fp_ctrl_ren3}, {uops_7_fp_ctrl_ren3}, {uops_6_fp_ctrl_ren3}, {uops_5_fp_ctrl_ren3}, {uops_4_fp_ctrl_ren3}, {uops_3_fp_ctrl_ren3}, {uops_2_fp_ctrl_ren3}, {uops_1_fp_ctrl_ren3}, {uops_0_fp_ctrl_ren3}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_ren3 = _GEN_56[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_57 = {{uops_0_fp_ctrl_swap12}, {uops_14_fp_ctrl_swap12}, {uops_13_fp_ctrl_swap12}, {uops_12_fp_ctrl_swap12}, {uops_11_fp_ctrl_swap12}, {uops_10_fp_ctrl_swap12}, {uops_9_fp_ctrl_swap12}, {uops_8_fp_ctrl_swap12}, {uops_7_fp_ctrl_swap12}, {uops_6_fp_ctrl_swap12}, {uops_5_fp_ctrl_swap12}, {uops_4_fp_ctrl_swap12}, {uops_3_fp_ctrl_swap12}, {uops_2_fp_ctrl_swap12}, {uops_1_fp_ctrl_swap12}, {uops_0_fp_ctrl_swap12}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_swap12 = _GEN_57[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_58 = {{uops_0_fp_ctrl_swap23}, {uops_14_fp_ctrl_swap23}, {uops_13_fp_ctrl_swap23}, {uops_12_fp_ctrl_swap23}, {uops_11_fp_ctrl_swap23}, {uops_10_fp_ctrl_swap23}, {uops_9_fp_ctrl_swap23}, {uops_8_fp_ctrl_swap23}, {uops_7_fp_ctrl_swap23}, {uops_6_fp_ctrl_swap23}, {uops_5_fp_ctrl_swap23}, {uops_4_fp_ctrl_swap23}, {uops_3_fp_ctrl_swap23}, {uops_2_fp_ctrl_swap23}, {uops_1_fp_ctrl_swap23}, {uops_0_fp_ctrl_swap23}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_swap23 = _GEN_58[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_59 = {{uops_0_fp_ctrl_typeTagIn}, {uops_14_fp_ctrl_typeTagIn}, {uops_13_fp_ctrl_typeTagIn}, {uops_12_fp_ctrl_typeTagIn}, {uops_11_fp_ctrl_typeTagIn}, {uops_10_fp_ctrl_typeTagIn}, {uops_9_fp_ctrl_typeTagIn}, {uops_8_fp_ctrl_typeTagIn}, {uops_7_fp_ctrl_typeTagIn}, {uops_6_fp_ctrl_typeTagIn}, {uops_5_fp_ctrl_typeTagIn}, {uops_4_fp_ctrl_typeTagIn}, {uops_3_fp_ctrl_typeTagIn}, {uops_2_fp_ctrl_typeTagIn}, {uops_1_fp_ctrl_typeTagIn}, {uops_0_fp_ctrl_typeTagIn}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_typeTagIn = _GEN_59[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_60 = {{uops_0_fp_ctrl_typeTagOut}, {uops_14_fp_ctrl_typeTagOut}, {uops_13_fp_ctrl_typeTagOut}, {uops_12_fp_ctrl_typeTagOut}, {uops_11_fp_ctrl_typeTagOut}, {uops_10_fp_ctrl_typeTagOut}, {uops_9_fp_ctrl_typeTagOut}, {uops_8_fp_ctrl_typeTagOut}, {uops_7_fp_ctrl_typeTagOut}, {uops_6_fp_ctrl_typeTagOut}, {uops_5_fp_ctrl_typeTagOut}, {uops_4_fp_ctrl_typeTagOut}, {uops_3_fp_ctrl_typeTagOut}, {uops_2_fp_ctrl_typeTagOut}, {uops_1_fp_ctrl_typeTagOut}, {uops_0_fp_ctrl_typeTagOut}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_typeTagOut = _GEN_60[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_61 = {{uops_0_fp_ctrl_fromint}, {uops_14_fp_ctrl_fromint}, {uops_13_fp_ctrl_fromint}, {uops_12_fp_ctrl_fromint}, {uops_11_fp_ctrl_fromint}, {uops_10_fp_ctrl_fromint}, {uops_9_fp_ctrl_fromint}, {uops_8_fp_ctrl_fromint}, {uops_7_fp_ctrl_fromint}, {uops_6_fp_ctrl_fromint}, {uops_5_fp_ctrl_fromint}, {uops_4_fp_ctrl_fromint}, {uops_3_fp_ctrl_fromint}, {uops_2_fp_ctrl_fromint}, {uops_1_fp_ctrl_fromint}, {uops_0_fp_ctrl_fromint}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_fromint = _GEN_61[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_62 = {{uops_0_fp_ctrl_toint}, {uops_14_fp_ctrl_toint}, {uops_13_fp_ctrl_toint}, {uops_12_fp_ctrl_toint}, {uops_11_fp_ctrl_toint}, {uops_10_fp_ctrl_toint}, {uops_9_fp_ctrl_toint}, {uops_8_fp_ctrl_toint}, {uops_7_fp_ctrl_toint}, {uops_6_fp_ctrl_toint}, {uops_5_fp_ctrl_toint}, {uops_4_fp_ctrl_toint}, {uops_3_fp_ctrl_toint}, {uops_2_fp_ctrl_toint}, {uops_1_fp_ctrl_toint}, {uops_0_fp_ctrl_toint}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_toint = _GEN_62[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_63 = {{uops_0_fp_ctrl_fastpipe}, {uops_14_fp_ctrl_fastpipe}, {uops_13_fp_ctrl_fastpipe}, {uops_12_fp_ctrl_fastpipe}, {uops_11_fp_ctrl_fastpipe}, {uops_10_fp_ctrl_fastpipe}, {uops_9_fp_ctrl_fastpipe}, {uops_8_fp_ctrl_fastpipe}, {uops_7_fp_ctrl_fastpipe}, {uops_6_fp_ctrl_fastpipe}, {uops_5_fp_ctrl_fastpipe}, {uops_4_fp_ctrl_fastpipe}, {uops_3_fp_ctrl_fastpipe}, {uops_2_fp_ctrl_fastpipe}, {uops_1_fp_ctrl_fastpipe}, {uops_0_fp_ctrl_fastpipe}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_fastpipe = _GEN_63[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_64 = {{uops_0_fp_ctrl_fma}, {uops_14_fp_ctrl_fma}, {uops_13_fp_ctrl_fma}, {uops_12_fp_ctrl_fma}, {uops_11_fp_ctrl_fma}, {uops_10_fp_ctrl_fma}, {uops_9_fp_ctrl_fma}, {uops_8_fp_ctrl_fma}, {uops_7_fp_ctrl_fma}, {uops_6_fp_ctrl_fma}, {uops_5_fp_ctrl_fma}, {uops_4_fp_ctrl_fma}, {uops_3_fp_ctrl_fma}, {uops_2_fp_ctrl_fma}, {uops_1_fp_ctrl_fma}, {uops_0_fp_ctrl_fma}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_fma = _GEN_64[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_65 = {{uops_0_fp_ctrl_div}, {uops_14_fp_ctrl_div}, {uops_13_fp_ctrl_div}, {uops_12_fp_ctrl_div}, {uops_11_fp_ctrl_div}, {uops_10_fp_ctrl_div}, {uops_9_fp_ctrl_div}, {uops_8_fp_ctrl_div}, {uops_7_fp_ctrl_div}, {uops_6_fp_ctrl_div}, {uops_5_fp_ctrl_div}, {uops_4_fp_ctrl_div}, {uops_3_fp_ctrl_div}, {uops_2_fp_ctrl_div}, {uops_1_fp_ctrl_div}, {uops_0_fp_ctrl_div}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_div = _GEN_65[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_66 = {{uops_0_fp_ctrl_sqrt}, {uops_14_fp_ctrl_sqrt}, {uops_13_fp_ctrl_sqrt}, {uops_12_fp_ctrl_sqrt}, {uops_11_fp_ctrl_sqrt}, {uops_10_fp_ctrl_sqrt}, {uops_9_fp_ctrl_sqrt}, {uops_8_fp_ctrl_sqrt}, {uops_7_fp_ctrl_sqrt}, {uops_6_fp_ctrl_sqrt}, {uops_5_fp_ctrl_sqrt}, {uops_4_fp_ctrl_sqrt}, {uops_3_fp_ctrl_sqrt}, {uops_2_fp_ctrl_sqrt}, {uops_1_fp_ctrl_sqrt}, {uops_0_fp_ctrl_sqrt}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_sqrt = _GEN_66[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_67 = {{uops_0_fp_ctrl_wflags}, {uops_14_fp_ctrl_wflags}, {uops_13_fp_ctrl_wflags}, {uops_12_fp_ctrl_wflags}, {uops_11_fp_ctrl_wflags}, {uops_10_fp_ctrl_wflags}, {uops_9_fp_ctrl_wflags}, {uops_8_fp_ctrl_wflags}, {uops_7_fp_ctrl_wflags}, {uops_6_fp_ctrl_wflags}, {uops_5_fp_ctrl_wflags}, {uops_4_fp_ctrl_wflags}, {uops_3_fp_ctrl_wflags}, {uops_2_fp_ctrl_wflags}, {uops_1_fp_ctrl_wflags}, {uops_0_fp_ctrl_wflags}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_wflags = _GEN_67[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_68 = {{uops_0_fp_ctrl_vec}, {uops_14_fp_ctrl_vec}, {uops_13_fp_ctrl_vec}, {uops_12_fp_ctrl_vec}, {uops_11_fp_ctrl_vec}, {uops_10_fp_ctrl_vec}, {uops_9_fp_ctrl_vec}, {uops_8_fp_ctrl_vec}, {uops_7_fp_ctrl_vec}, {uops_6_fp_ctrl_vec}, {uops_5_fp_ctrl_vec}, {uops_4_fp_ctrl_vec}, {uops_3_fp_ctrl_vec}, {uops_2_fp_ctrl_vec}, {uops_1_fp_ctrl_vec}, {uops_0_fp_ctrl_vec}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_vec = _GEN_68[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][4:0] _GEN_69 = {{uops_0_rob_idx}, {uops_14_rob_idx}, {uops_13_rob_idx}, {uops_12_rob_idx}, {uops_11_rob_idx}, {uops_10_rob_idx}, {uops_9_rob_idx}, {uops_8_rob_idx}, {uops_7_rob_idx}, {uops_6_rob_idx}, {uops_5_rob_idx}, {uops_4_rob_idx}, {uops_3_rob_idx}, {uops_2_rob_idx}, {uops_1_rob_idx}, {uops_0_rob_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_rob_idx = _GEN_69[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_70 = {{uops_0_ldq_idx}, {uops_14_ldq_idx}, {uops_13_ldq_idx}, {uops_12_ldq_idx}, {uops_11_ldq_idx}, {uops_10_ldq_idx}, {uops_9_ldq_idx}, {uops_8_ldq_idx}, {uops_7_ldq_idx}, {uops_6_ldq_idx}, {uops_5_ldq_idx}, {uops_4_ldq_idx}, {uops_3_ldq_idx}, {uops_2_ldq_idx}, {uops_1_ldq_idx}, {uops_0_ldq_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_ldq_idx = _GEN_70[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_71 = {{uops_0_stq_idx}, {uops_14_stq_idx}, {uops_13_stq_idx}, {uops_12_stq_idx}, {uops_11_stq_idx}, {uops_10_stq_idx}, {uops_9_stq_idx}, {uops_8_stq_idx}, {uops_7_stq_idx}, {uops_6_stq_idx}, {uops_5_stq_idx}, {uops_4_stq_idx}, {uops_3_stq_idx}, {uops_2_stq_idx}, {uops_1_stq_idx}, {uops_0_stq_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_stq_idx = _GEN_71[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_72 = {{uops_0_rxq_idx}, {uops_14_rxq_idx}, {uops_13_rxq_idx}, {uops_12_rxq_idx}, {uops_11_rxq_idx}, {uops_10_rxq_idx}, {uops_9_rxq_idx}, {uops_8_rxq_idx}, {uops_7_rxq_idx}, {uops_6_rxq_idx}, {uops_5_rxq_idx}, {uops_4_rxq_idx}, {uops_3_rxq_idx}, {uops_2_rxq_idx}, {uops_1_rxq_idx}, {uops_0_rxq_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_rxq_idx = _GEN_72[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_73 = {{uops_0_pdst}, {uops_14_pdst}, {uops_13_pdst}, {uops_12_pdst}, {uops_11_pdst}, {uops_10_pdst}, {uops_9_pdst}, {uops_8_pdst}, {uops_7_pdst}, {uops_6_pdst}, {uops_5_pdst}, {uops_4_pdst}, {uops_3_pdst}, {uops_2_pdst}, {uops_1_pdst}, {uops_0_pdst}}; // @[util.scala:505:22, :547:21]
assign out_uop_pdst = _GEN_73[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_74 = {{uops_0_prs1}, {uops_14_prs1}, {uops_13_prs1}, {uops_12_prs1}, {uops_11_prs1}, {uops_10_prs1}, {uops_9_prs1}, {uops_8_prs1}, {uops_7_prs1}, {uops_6_prs1}, {uops_5_prs1}, {uops_4_prs1}, {uops_3_prs1}, {uops_2_prs1}, {uops_1_prs1}, {uops_0_prs1}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs1 = _GEN_74[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_75 = {{uops_0_prs2}, {uops_14_prs2}, {uops_13_prs2}, {uops_12_prs2}, {uops_11_prs2}, {uops_10_prs2}, {uops_9_prs2}, {uops_8_prs2}, {uops_7_prs2}, {uops_6_prs2}, {uops_5_prs2}, {uops_4_prs2}, {uops_3_prs2}, {uops_2_prs2}, {uops_1_prs2}, {uops_0_prs2}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs2 = _GEN_75[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_76 = {{uops_0_prs3}, {uops_14_prs3}, {uops_13_prs3}, {uops_12_prs3}, {uops_11_prs3}, {uops_10_prs3}, {uops_9_prs3}, {uops_8_prs3}, {uops_7_prs3}, {uops_6_prs3}, {uops_5_prs3}, {uops_4_prs3}, {uops_3_prs3}, {uops_2_prs3}, {uops_1_prs3}, {uops_0_prs3}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs3 = _GEN_76[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_77 = {{uops_0_ppred}, {uops_14_ppred}, {uops_13_ppred}, {uops_12_ppred}, {uops_11_ppred}, {uops_10_ppred}, {uops_9_ppred}, {uops_8_ppred}, {uops_7_ppred}, {uops_6_ppred}, {uops_5_ppred}, {uops_4_ppred}, {uops_3_ppred}, {uops_2_ppred}, {uops_1_ppred}, {uops_0_ppred}}; // @[util.scala:505:22, :547:21]
assign out_uop_ppred = _GEN_77[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_78 = {{uops_0_prs1_busy}, {uops_14_prs1_busy}, {uops_13_prs1_busy}, {uops_12_prs1_busy}, {uops_11_prs1_busy}, {uops_10_prs1_busy}, {uops_9_prs1_busy}, {uops_8_prs1_busy}, {uops_7_prs1_busy}, {uops_6_prs1_busy}, {uops_5_prs1_busy}, {uops_4_prs1_busy}, {uops_3_prs1_busy}, {uops_2_prs1_busy}, {uops_1_prs1_busy}, {uops_0_prs1_busy}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs1_busy = _GEN_78[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_79 = {{uops_0_prs2_busy}, {uops_14_prs2_busy}, {uops_13_prs2_busy}, {uops_12_prs2_busy}, {uops_11_prs2_busy}, {uops_10_prs2_busy}, {uops_9_prs2_busy}, {uops_8_prs2_busy}, {uops_7_prs2_busy}, {uops_6_prs2_busy}, {uops_5_prs2_busy}, {uops_4_prs2_busy}, {uops_3_prs2_busy}, {uops_2_prs2_busy}, {uops_1_prs2_busy}, {uops_0_prs2_busy}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs2_busy = _GEN_79[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_80 = {{uops_0_prs3_busy}, {uops_14_prs3_busy}, {uops_13_prs3_busy}, {uops_12_prs3_busy}, {uops_11_prs3_busy}, {uops_10_prs3_busy}, {uops_9_prs3_busy}, {uops_8_prs3_busy}, {uops_7_prs3_busy}, {uops_6_prs3_busy}, {uops_5_prs3_busy}, {uops_4_prs3_busy}, {uops_3_prs3_busy}, {uops_2_prs3_busy}, {uops_1_prs3_busy}, {uops_0_prs3_busy}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs3_busy = _GEN_80[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_81 = {{uops_0_ppred_busy}, {uops_14_ppred_busy}, {uops_13_ppred_busy}, {uops_12_ppred_busy}, {uops_11_ppred_busy}, {uops_10_ppred_busy}, {uops_9_ppred_busy}, {uops_8_ppred_busy}, {uops_7_ppred_busy}, {uops_6_ppred_busy}, {uops_5_ppred_busy}, {uops_4_ppred_busy}, {uops_3_ppred_busy}, {uops_2_ppred_busy}, {uops_1_ppred_busy}, {uops_0_ppred_busy}}; // @[util.scala:505:22, :547:21]
assign out_uop_ppred_busy = _GEN_81[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_82 = {{uops_0_stale_pdst}, {uops_14_stale_pdst}, {uops_13_stale_pdst}, {uops_12_stale_pdst}, {uops_11_stale_pdst}, {uops_10_stale_pdst}, {uops_9_stale_pdst}, {uops_8_stale_pdst}, {uops_7_stale_pdst}, {uops_6_stale_pdst}, {uops_5_stale_pdst}, {uops_4_stale_pdst}, {uops_3_stale_pdst}, {uops_2_stale_pdst}, {uops_1_stale_pdst}, {uops_0_stale_pdst}}; // @[util.scala:505:22, :547:21]
assign out_uop_stale_pdst = _GEN_82[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_83 = {{uops_0_exception}, {uops_14_exception}, {uops_13_exception}, {uops_12_exception}, {uops_11_exception}, {uops_10_exception}, {uops_9_exception}, {uops_8_exception}, {uops_7_exception}, {uops_6_exception}, {uops_5_exception}, {uops_4_exception}, {uops_3_exception}, {uops_2_exception}, {uops_1_exception}, {uops_0_exception}}; // @[util.scala:505:22, :547:21]
assign out_uop_exception = _GEN_83[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][63:0] _GEN_84 = {{uops_0_exc_cause}, {uops_14_exc_cause}, {uops_13_exc_cause}, {uops_12_exc_cause}, {uops_11_exc_cause}, {uops_10_exc_cause}, {uops_9_exc_cause}, {uops_8_exc_cause}, {uops_7_exc_cause}, {uops_6_exc_cause}, {uops_5_exc_cause}, {uops_4_exc_cause}, {uops_3_exc_cause}, {uops_2_exc_cause}, {uops_1_exc_cause}, {uops_0_exc_cause}}; // @[util.scala:505:22, :547:21]
assign out_uop_exc_cause = _GEN_84[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][4:0] _GEN_85 = {{uops_0_mem_cmd}, {uops_14_mem_cmd}, {uops_13_mem_cmd}, {uops_12_mem_cmd}, {uops_11_mem_cmd}, {uops_10_mem_cmd}, {uops_9_mem_cmd}, {uops_8_mem_cmd}, {uops_7_mem_cmd}, {uops_6_mem_cmd}, {uops_5_mem_cmd}, {uops_4_mem_cmd}, {uops_3_mem_cmd}, {uops_2_mem_cmd}, {uops_1_mem_cmd}, {uops_0_mem_cmd}}; // @[util.scala:505:22, :547:21]
assign out_uop_mem_cmd = _GEN_85[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_86 = {{uops_0_mem_size}, {uops_14_mem_size}, {uops_13_mem_size}, {uops_12_mem_size}, {uops_11_mem_size}, {uops_10_mem_size}, {uops_9_mem_size}, {uops_8_mem_size}, {uops_7_mem_size}, {uops_6_mem_size}, {uops_5_mem_size}, {uops_4_mem_size}, {uops_3_mem_size}, {uops_2_mem_size}, {uops_1_mem_size}, {uops_0_mem_size}}; // @[util.scala:505:22, :547:21]
assign out_uop_mem_size = _GEN_86[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_87 = {{uops_0_mem_signed}, {uops_14_mem_signed}, {uops_13_mem_signed}, {uops_12_mem_signed}, {uops_11_mem_signed}, {uops_10_mem_signed}, {uops_9_mem_signed}, {uops_8_mem_signed}, {uops_7_mem_signed}, {uops_6_mem_signed}, {uops_5_mem_signed}, {uops_4_mem_signed}, {uops_3_mem_signed}, {uops_2_mem_signed}, {uops_1_mem_signed}, {uops_0_mem_signed}}; // @[util.scala:505:22, :547:21]
assign out_uop_mem_signed = _GEN_87[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_88 = {{uops_0_uses_ldq}, {uops_14_uses_ldq}, {uops_13_uses_ldq}, {uops_12_uses_ldq}, {uops_11_uses_ldq}, {uops_10_uses_ldq}, {uops_9_uses_ldq}, {uops_8_uses_ldq}, {uops_7_uses_ldq}, {uops_6_uses_ldq}, {uops_5_uses_ldq}, {uops_4_uses_ldq}, {uops_3_uses_ldq}, {uops_2_uses_ldq}, {uops_1_uses_ldq}, {uops_0_uses_ldq}}; // @[util.scala:505:22, :547:21]
assign out_uop_uses_ldq = _GEN_88[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_89 = {{uops_0_uses_stq}, {uops_14_uses_stq}, {uops_13_uses_stq}, {uops_12_uses_stq}, {uops_11_uses_stq}, {uops_10_uses_stq}, {uops_9_uses_stq}, {uops_8_uses_stq}, {uops_7_uses_stq}, {uops_6_uses_stq}, {uops_5_uses_stq}, {uops_4_uses_stq}, {uops_3_uses_stq}, {uops_2_uses_stq}, {uops_1_uses_stq}, {uops_0_uses_stq}}; // @[util.scala:505:22, :547:21]
assign out_uop_uses_stq = _GEN_89[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_90 = {{uops_0_is_unique}, {uops_14_is_unique}, {uops_13_is_unique}, {uops_12_is_unique}, {uops_11_is_unique}, {uops_10_is_unique}, {uops_9_is_unique}, {uops_8_is_unique}, {uops_7_is_unique}, {uops_6_is_unique}, {uops_5_is_unique}, {uops_4_is_unique}, {uops_3_is_unique}, {uops_2_is_unique}, {uops_1_is_unique}, {uops_0_is_unique}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_unique = _GEN_90[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_91 = {{uops_0_flush_on_commit}, {uops_14_flush_on_commit}, {uops_13_flush_on_commit}, {uops_12_flush_on_commit}, {uops_11_flush_on_commit}, {uops_10_flush_on_commit}, {uops_9_flush_on_commit}, {uops_8_flush_on_commit}, {uops_7_flush_on_commit}, {uops_6_flush_on_commit}, {uops_5_flush_on_commit}, {uops_4_flush_on_commit}, {uops_3_flush_on_commit}, {uops_2_flush_on_commit}, {uops_1_flush_on_commit}, {uops_0_flush_on_commit}}; // @[util.scala:505:22, :547:21]
assign out_uop_flush_on_commit = _GEN_91[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][2:0] _GEN_92 = {{uops_0_csr_cmd}, {uops_14_csr_cmd}, {uops_13_csr_cmd}, {uops_12_csr_cmd}, {uops_11_csr_cmd}, {uops_10_csr_cmd}, {uops_9_csr_cmd}, {uops_8_csr_cmd}, {uops_7_csr_cmd}, {uops_6_csr_cmd}, {uops_5_csr_cmd}, {uops_4_csr_cmd}, {uops_3_csr_cmd}, {uops_2_csr_cmd}, {uops_1_csr_cmd}, {uops_0_csr_cmd}}; // @[util.scala:505:22, :547:21]
assign out_uop_csr_cmd = _GEN_92[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_93 = {{uops_0_ldst_is_rs1}, {uops_14_ldst_is_rs1}, {uops_13_ldst_is_rs1}, {uops_12_ldst_is_rs1}, {uops_11_ldst_is_rs1}, {uops_10_ldst_is_rs1}, {uops_9_ldst_is_rs1}, {uops_8_ldst_is_rs1}, {uops_7_ldst_is_rs1}, {uops_6_ldst_is_rs1}, {uops_5_ldst_is_rs1}, {uops_4_ldst_is_rs1}, {uops_3_ldst_is_rs1}, {uops_2_ldst_is_rs1}, {uops_1_ldst_is_rs1}, {uops_0_ldst_is_rs1}}; // @[util.scala:505:22, :547:21]
assign out_uop_ldst_is_rs1 = _GEN_93[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_94 = {{uops_0_ldst}, {uops_14_ldst}, {uops_13_ldst}, {uops_12_ldst}, {uops_11_ldst}, {uops_10_ldst}, {uops_9_ldst}, {uops_8_ldst}, {uops_7_ldst}, {uops_6_ldst}, {uops_5_ldst}, {uops_4_ldst}, {uops_3_ldst}, {uops_2_ldst}, {uops_1_ldst}, {uops_0_ldst}}; // @[util.scala:505:22, :547:21]
assign out_uop_ldst = _GEN_94[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_95 = {{uops_0_lrs1}, {uops_14_lrs1}, {uops_13_lrs1}, {uops_12_lrs1}, {uops_11_lrs1}, {uops_10_lrs1}, {uops_9_lrs1}, {uops_8_lrs1}, {uops_7_lrs1}, {uops_6_lrs1}, {uops_5_lrs1}, {uops_4_lrs1}, {uops_3_lrs1}, {uops_2_lrs1}, {uops_1_lrs1}, {uops_0_lrs1}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs1 = _GEN_95[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_96 = {{uops_0_lrs2}, {uops_14_lrs2}, {uops_13_lrs2}, {uops_12_lrs2}, {uops_11_lrs2}, {uops_10_lrs2}, {uops_9_lrs2}, {uops_8_lrs2}, {uops_7_lrs2}, {uops_6_lrs2}, {uops_5_lrs2}, {uops_4_lrs2}, {uops_3_lrs2}, {uops_2_lrs2}, {uops_1_lrs2}, {uops_0_lrs2}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs2 = _GEN_96[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_97 = {{uops_0_lrs3}, {uops_14_lrs3}, {uops_13_lrs3}, {uops_12_lrs3}, {uops_11_lrs3}, {uops_10_lrs3}, {uops_9_lrs3}, {uops_8_lrs3}, {uops_7_lrs3}, {uops_6_lrs3}, {uops_5_lrs3}, {uops_4_lrs3}, {uops_3_lrs3}, {uops_2_lrs3}, {uops_1_lrs3}, {uops_0_lrs3}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs3 = _GEN_97[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_98 = {{uops_0_dst_rtype}, {uops_14_dst_rtype}, {uops_13_dst_rtype}, {uops_12_dst_rtype}, {uops_11_dst_rtype}, {uops_10_dst_rtype}, {uops_9_dst_rtype}, {uops_8_dst_rtype}, {uops_7_dst_rtype}, {uops_6_dst_rtype}, {uops_5_dst_rtype}, {uops_4_dst_rtype}, {uops_3_dst_rtype}, {uops_2_dst_rtype}, {uops_1_dst_rtype}, {uops_0_dst_rtype}}; // @[util.scala:505:22, :547:21]
assign out_uop_dst_rtype = _GEN_98[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_99 = {{uops_0_lrs1_rtype}, {uops_14_lrs1_rtype}, {uops_13_lrs1_rtype}, {uops_12_lrs1_rtype}, {uops_11_lrs1_rtype}, {uops_10_lrs1_rtype}, {uops_9_lrs1_rtype}, {uops_8_lrs1_rtype}, {uops_7_lrs1_rtype}, {uops_6_lrs1_rtype}, {uops_5_lrs1_rtype}, {uops_4_lrs1_rtype}, {uops_3_lrs1_rtype}, {uops_2_lrs1_rtype}, {uops_1_lrs1_rtype}, {uops_0_lrs1_rtype}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs1_rtype = _GEN_99[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_100 = {{uops_0_lrs2_rtype}, {uops_14_lrs2_rtype}, {uops_13_lrs2_rtype}, {uops_12_lrs2_rtype}, {uops_11_lrs2_rtype}, {uops_10_lrs2_rtype}, {uops_9_lrs2_rtype}, {uops_8_lrs2_rtype}, {uops_7_lrs2_rtype}, {uops_6_lrs2_rtype}, {uops_5_lrs2_rtype}, {uops_4_lrs2_rtype}, {uops_3_lrs2_rtype}, {uops_2_lrs2_rtype}, {uops_1_lrs2_rtype}, {uops_0_lrs2_rtype}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs2_rtype = _GEN_100[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_101 = {{uops_0_frs3_en}, {uops_14_frs3_en}, {uops_13_frs3_en}, {uops_12_frs3_en}, {uops_11_frs3_en}, {uops_10_frs3_en}, {uops_9_frs3_en}, {uops_8_frs3_en}, {uops_7_frs3_en}, {uops_6_frs3_en}, {uops_5_frs3_en}, {uops_4_frs3_en}, {uops_3_frs3_en}, {uops_2_frs3_en}, {uops_1_frs3_en}, {uops_0_frs3_en}}; // @[util.scala:505:22, :547:21]
assign out_uop_frs3_en = _GEN_101[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_102 = {{uops_0_fcn_dw}, {uops_14_fcn_dw}, {uops_13_fcn_dw}, {uops_12_fcn_dw}, {uops_11_fcn_dw}, {uops_10_fcn_dw}, {uops_9_fcn_dw}, {uops_8_fcn_dw}, {uops_7_fcn_dw}, {uops_6_fcn_dw}, {uops_5_fcn_dw}, {uops_4_fcn_dw}, {uops_3_fcn_dw}, {uops_2_fcn_dw}, {uops_1_fcn_dw}, {uops_0_fcn_dw}}; // @[util.scala:505:22, :547:21]
assign out_uop_fcn_dw = _GEN_102[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][4:0] _GEN_103 = {{uops_0_fcn_op}, {uops_14_fcn_op}, {uops_13_fcn_op}, {uops_12_fcn_op}, {uops_11_fcn_op}, {uops_10_fcn_op}, {uops_9_fcn_op}, {uops_8_fcn_op}, {uops_7_fcn_op}, {uops_6_fcn_op}, {uops_5_fcn_op}, {uops_4_fcn_op}, {uops_3_fcn_op}, {uops_2_fcn_op}, {uops_1_fcn_op}, {uops_0_fcn_op}}; // @[util.scala:505:22, :547:21]
assign out_uop_fcn_op = _GEN_103[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_104 = {{uops_0_fp_val}, {uops_14_fp_val}, {uops_13_fp_val}, {uops_12_fp_val}, {uops_11_fp_val}, {uops_10_fp_val}, {uops_9_fp_val}, {uops_8_fp_val}, {uops_7_fp_val}, {uops_6_fp_val}, {uops_5_fp_val}, {uops_4_fp_val}, {uops_3_fp_val}, {uops_2_fp_val}, {uops_1_fp_val}, {uops_0_fp_val}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_val = _GEN_104[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][2:0] _GEN_105 = {{uops_0_fp_rm}, {uops_14_fp_rm}, {uops_13_fp_rm}, {uops_12_fp_rm}, {uops_11_fp_rm}, {uops_10_fp_rm}, {uops_9_fp_rm}, {uops_8_fp_rm}, {uops_7_fp_rm}, {uops_6_fp_rm}, {uops_5_fp_rm}, {uops_4_fp_rm}, {uops_3_fp_rm}, {uops_2_fp_rm}, {uops_1_fp_rm}, {uops_0_fp_rm}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_rm = _GEN_105[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_106 = {{uops_0_fp_typ}, {uops_14_fp_typ}, {uops_13_fp_typ}, {uops_12_fp_typ}, {uops_11_fp_typ}, {uops_10_fp_typ}, {uops_9_fp_typ}, {uops_8_fp_typ}, {uops_7_fp_typ}, {uops_6_fp_typ}, {uops_5_fp_typ}, {uops_4_fp_typ}, {uops_3_fp_typ}, {uops_2_fp_typ}, {uops_1_fp_typ}, {uops_0_fp_typ}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_typ = _GEN_106[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_107 = {{uops_0_xcpt_pf_if}, {uops_14_xcpt_pf_if}, {uops_13_xcpt_pf_if}, {uops_12_xcpt_pf_if}, {uops_11_xcpt_pf_if}, {uops_10_xcpt_pf_if}, {uops_9_xcpt_pf_if}, {uops_8_xcpt_pf_if}, {uops_7_xcpt_pf_if}, {uops_6_xcpt_pf_if}, {uops_5_xcpt_pf_if}, {uops_4_xcpt_pf_if}, {uops_3_xcpt_pf_if}, {uops_2_xcpt_pf_if}, {uops_1_xcpt_pf_if}, {uops_0_xcpt_pf_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_xcpt_pf_if = _GEN_107[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_108 = {{uops_0_xcpt_ae_if}, {uops_14_xcpt_ae_if}, {uops_13_xcpt_ae_if}, {uops_12_xcpt_ae_if}, {uops_11_xcpt_ae_if}, {uops_10_xcpt_ae_if}, {uops_9_xcpt_ae_if}, {uops_8_xcpt_ae_if}, {uops_7_xcpt_ae_if}, {uops_6_xcpt_ae_if}, {uops_5_xcpt_ae_if}, {uops_4_xcpt_ae_if}, {uops_3_xcpt_ae_if}, {uops_2_xcpt_ae_if}, {uops_1_xcpt_ae_if}, {uops_0_xcpt_ae_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_xcpt_ae_if = _GEN_108[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_109 = {{uops_0_xcpt_ma_if}, {uops_14_xcpt_ma_if}, {uops_13_xcpt_ma_if}, {uops_12_xcpt_ma_if}, {uops_11_xcpt_ma_if}, {uops_10_xcpt_ma_if}, {uops_9_xcpt_ma_if}, {uops_8_xcpt_ma_if}, {uops_7_xcpt_ma_if}, {uops_6_xcpt_ma_if}, {uops_5_xcpt_ma_if}, {uops_4_xcpt_ma_if}, {uops_3_xcpt_ma_if}, {uops_2_xcpt_ma_if}, {uops_1_xcpt_ma_if}, {uops_0_xcpt_ma_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_xcpt_ma_if = _GEN_109[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_110 = {{uops_0_bp_debug_if}, {uops_14_bp_debug_if}, {uops_13_bp_debug_if}, {uops_12_bp_debug_if}, {uops_11_bp_debug_if}, {uops_10_bp_debug_if}, {uops_9_bp_debug_if}, {uops_8_bp_debug_if}, {uops_7_bp_debug_if}, {uops_6_bp_debug_if}, {uops_5_bp_debug_if}, {uops_4_bp_debug_if}, {uops_3_bp_debug_if}, {uops_2_bp_debug_if}, {uops_1_bp_debug_if}, {uops_0_bp_debug_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_bp_debug_if = _GEN_110[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_111 = {{uops_0_bp_xcpt_if}, {uops_14_bp_xcpt_if}, {uops_13_bp_xcpt_if}, {uops_12_bp_xcpt_if}, {uops_11_bp_xcpt_if}, {uops_10_bp_xcpt_if}, {uops_9_bp_xcpt_if}, {uops_8_bp_xcpt_if}, {uops_7_bp_xcpt_if}, {uops_6_bp_xcpt_if}, {uops_5_bp_xcpt_if}, {uops_4_bp_xcpt_if}, {uops_3_bp_xcpt_if}, {uops_2_bp_xcpt_if}, {uops_1_bp_xcpt_if}, {uops_0_bp_xcpt_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_bp_xcpt_if = _GEN_111[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][2:0] _GEN_112 = {{uops_0_debug_fsrc}, {uops_14_debug_fsrc}, {uops_13_debug_fsrc}, {uops_12_debug_fsrc}, {uops_11_debug_fsrc}, {uops_10_debug_fsrc}, {uops_9_debug_fsrc}, {uops_8_debug_fsrc}, {uops_7_debug_fsrc}, {uops_6_debug_fsrc}, {uops_5_debug_fsrc}, {uops_4_debug_fsrc}, {uops_3_debug_fsrc}, {uops_2_debug_fsrc}, {uops_1_debug_fsrc}, {uops_0_debug_fsrc}}; // @[util.scala:505:22, :547:21]
assign out_uop_debug_fsrc = _GEN_112[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][2:0] _GEN_113 = {{uops_0_debug_tsrc}, {uops_14_debug_tsrc}, {uops_13_debug_tsrc}, {uops_12_debug_tsrc}, {uops_11_debug_tsrc}, {uops_10_debug_tsrc}, {uops_9_debug_tsrc}, {uops_8_debug_tsrc}, {uops_7_debug_tsrc}, {uops_6_debug_tsrc}, {uops_5_debug_tsrc}, {uops_4_debug_tsrc}, {uops_3_debug_tsrc}, {uops_2_debug_tsrc}, {uops_1_debug_tsrc}, {uops_0_debug_tsrc}}; // @[util.scala:505:22, :547:21]
assign out_uop_debug_tsrc = _GEN_113[deq_ptr_value]; // @[Counter.scala:61:40]
wire _io_deq_valid_T = ~io_empty_0; // @[util.scala:458:7, :515:71, :548:32]
assign _io_deq_valid_T_1 = _io_deq_valid_T & _GEN_0; // @[util.scala:515:44, :548:{32,42}]
assign io_deq_valid_0 = _io_deq_valid_T_1; // @[util.scala:458:7, :548:42]
wire [4:0] _ptr_diff_T = _GEN_1 - _GEN_2; // @[Counter.scala:77:24]
wire [3:0] ptr_diff = _ptr_diff_T[3:0]; // @[util.scala:551:34]
wire [3:0] _io_count_T = {4{maybe_full}}; // @[util.scala:509:29, :557:12]
wire _io_count_T_1 = deq_ptr_value > enq_ptr_value; // @[Counter.scala:61:40]
wire [4:0] _io_count_T_2 = {1'h0, ptr_diff} + 5'hF; // @[util.scala:551:34, :560:26]
wire [3:0] _io_count_T_3 = _io_count_T_2[3:0]; // @[util.scala:560:26]
wire [3:0] _io_count_T_4 = _io_count_T_1 ? _io_count_T_3 : ptr_diff; // @[util.scala:551:34, :559:{12,27}, :560:26]
assign _io_count_T_5 = ptr_match ? _io_count_T : _io_count_T_4; // @[util.scala:511:35, :556:22, :557:12, :559:12]
assign io_count_0 = _io_count_T_5; // @[util.scala:458:7, :556:22]
wire _GEN_114 = enq_ptr_value == 4'h0; // @[Counter.scala:61:40]
wire _GEN_115 = do_enq & _GEN_114; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_116 = enq_ptr_value == 4'h1; // @[Counter.scala:61:40]
wire _GEN_117 = do_enq & _GEN_116; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_118 = enq_ptr_value == 4'h2; // @[Counter.scala:61:40]
wire _GEN_119 = do_enq & _GEN_118; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_120 = enq_ptr_value == 4'h3; // @[Counter.scala:61:40]
wire _GEN_121 = do_enq & _GEN_120; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_122 = enq_ptr_value == 4'h4; // @[Counter.scala:61:40]
wire _GEN_123 = do_enq & _GEN_122; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_124 = enq_ptr_value == 4'h5; // @[Counter.scala:61:40]
wire _GEN_125 = do_enq & _GEN_124; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_126 = enq_ptr_value == 4'h6; // @[Counter.scala:61:40]
wire _GEN_127 = do_enq & _GEN_126; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_128 = enq_ptr_value == 4'h7; // @[Counter.scala:61:40]
wire _GEN_129 = do_enq & _GEN_128; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_130 = enq_ptr_value == 4'h8; // @[Counter.scala:61:40]
wire _GEN_131 = do_enq & _GEN_130; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_132 = enq_ptr_value == 4'h9; // @[Counter.scala:61:40]
wire _GEN_133 = do_enq & _GEN_132; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_134 = enq_ptr_value == 4'hA; // @[Counter.scala:61:40]
wire _GEN_135 = do_enq & _GEN_134; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_136 = enq_ptr_value == 4'hB; // @[Counter.scala:61:40]
wire _GEN_137 = do_enq & _GEN_136; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_138 = enq_ptr_value == 4'hC; // @[Counter.scala:61:40]
wire _GEN_139 = do_enq & _GEN_138; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_140 = enq_ptr_value == 4'hD; // @[Counter.scala:61:40]
wire _GEN_141 = do_enq & _GEN_140; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_142 = do_enq & wrap; // @[Counter.scala:73:24]
always @(posedge clock) begin // @[util.scala:458:7]
if (reset) begin // @[util.scala:458:7]
valids_0 <= 1'h0; // @[util.scala:504:26]
valids_1 <= 1'h0; // @[util.scala:504:26]
valids_2 <= 1'h0; // @[util.scala:504:26]
valids_3 <= 1'h0; // @[util.scala:504:26]
valids_4 <= 1'h0; // @[util.scala:504:26]
valids_5 <= 1'h0; // @[util.scala:504:26]
valids_6 <= 1'h0; // @[util.scala:504:26]
valids_7 <= 1'h0; // @[util.scala:504:26]
valids_8 <= 1'h0; // @[util.scala:504:26]
valids_9 <= 1'h0; // @[util.scala:504:26]
valids_10 <= 1'h0; // @[util.scala:504:26]
valids_11 <= 1'h0; // @[util.scala:504:26]
valids_12 <= 1'h0; // @[util.scala:504:26]
valids_13 <= 1'h0; // @[util.scala:504:26]
valids_14 <= 1'h0; // @[util.scala:504:26]
enq_ptr_value <= 4'h0; // @[Counter.scala:61:40]
deq_ptr_value <= 4'h0; // @[Counter.scala:61:40]
maybe_full <= 1'h0; // @[util.scala:509:29]
end
else begin // @[util.scala:458:7]
valids_0 <= ~(do_deq & deq_ptr_value == 4'h0) & (_GEN_115 | _valids_0_T_7); // @[Counter.scala:61:40]
valids_1 <= ~(do_deq & deq_ptr_value == 4'h1) & (_GEN_117 | _valids_1_T_7); // @[Counter.scala:61:40]
valids_2 <= ~(do_deq & deq_ptr_value == 4'h2) & (_GEN_119 | _valids_2_T_7); // @[Counter.scala:61:40]
valids_3 <= ~(do_deq & deq_ptr_value == 4'h3) & (_GEN_121 | _valids_3_T_7); // @[Counter.scala:61:40]
valids_4 <= ~(do_deq & deq_ptr_value == 4'h4) & (_GEN_123 | _valids_4_T_7); // @[Counter.scala:61:40]
valids_5 <= ~(do_deq & deq_ptr_value == 4'h5) & (_GEN_125 | _valids_5_T_7); // @[Counter.scala:61:40]
valids_6 <= ~(do_deq & deq_ptr_value == 4'h6) & (_GEN_127 | _valids_6_T_7); // @[Counter.scala:61:40]
valids_7 <= ~(do_deq & deq_ptr_value == 4'h7) & (_GEN_129 | _valids_7_T_7); // @[Counter.scala:61:40]
valids_8 <= ~(do_deq & deq_ptr_value == 4'h8) & (_GEN_131 | _valids_8_T_7); // @[Counter.scala:61:40]
valids_9 <= ~(do_deq & deq_ptr_value == 4'h9) & (_GEN_133 | _valids_9_T_7); // @[Counter.scala:61:40]
valids_10 <= ~(do_deq & deq_ptr_value == 4'hA) & (_GEN_135 | _valids_10_T_7); // @[Counter.scala:61:40]
valids_11 <= ~(do_deq & deq_ptr_value == 4'hB) & (_GEN_137 | _valids_11_T_7); // @[Counter.scala:61:40]
valids_12 <= ~(do_deq & deq_ptr_value == 4'hC) & (_GEN_139 | _valids_12_T_7); // @[Counter.scala:61:40]
valids_13 <= ~(do_deq & deq_ptr_value == 4'hD) & (_GEN_141 | _valids_13_T_7); // @[Counter.scala:61:40]
valids_14 <= ~(do_deq & wrap_1) & (_GEN_142 | _valids_14_T_7); // @[Counter.scala:73:24]
if (do_enq) // @[util.scala:514:26]
enq_ptr_value <= wrap ? 4'h0 : _value_T_1; // @[Counter.scala:61:40, :73:24, :77:{15,24}, :87:{20,28}]
if (do_deq) // @[util.scala:515:26]
deq_ptr_value <= wrap_1 ? 4'h0 : _value_T_3; // @[Counter.scala:61:40, :73:24, :77:{15,24}, :87:{20,28}]
if (~(do_enq == do_deq)) // @[util.scala:509:29, :514:26, :515:26, :539:{18,30}, :540:18]
maybe_full <= do_enq; // @[util.scala:509:29, :514:26]
end
if (_GEN_115) begin // @[util.scala:520:18, :526:19, :528:35]
uops_0_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_0_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_0_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_0_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_0_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_0_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_0_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_0_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_0_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_0_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_0_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_0_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_0_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_0_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_0_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_0_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_0_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_0_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_0_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_0_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_0_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_0_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_0_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_0_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_0_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_0_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_0_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_0_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_0_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_0_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_0_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_0_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_0_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_0_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_0_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_0_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_0_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_0_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_0_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_0_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_0_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_0_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_0_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_0_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_0_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_0_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_0_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_0_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_0_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_0_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_0_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_0_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_0_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_0_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_0_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_0_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_0_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_0_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_0_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_0_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_0_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_0_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_0_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_0_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_0_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_0_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_0_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_0_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_0_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_0_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_0_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_0_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_0_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_0_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_0_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_0_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_0_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_0_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_114) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_0_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_0) // @[util.scala:504:26]
uops_0_br_mask <= _uops_0_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_117) begin // @[util.scala:520:18, :526:19, :528:35]
uops_1_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_1_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_1_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_1_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_1_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_1_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_1_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_1_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_1_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_1_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_1_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_1_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_1_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_1_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_1_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_1_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_1_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_1_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_1_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_1_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_1_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_1_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_1_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_1_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_1_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_1_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_1_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_1_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_1_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_1_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_1_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_1_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_1_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_1_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_1_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_1_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_1_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_1_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_1_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_1_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_1_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_1_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_1_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_1_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_1_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_1_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_1_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_1_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_1_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_1_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_1_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_1_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_1_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_1_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_1_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_1_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_1_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_1_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_1_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_1_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_1_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_1_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_1_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_1_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_1_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_1_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_1_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_1_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_1_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_1_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_1_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_1_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_1_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_1_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_1_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_1_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_1_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_1_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_116) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_1_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_1) // @[util.scala:504:26]
uops_1_br_mask <= _uops_1_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_119) begin // @[util.scala:520:18, :526:19, :528:35]
uops_2_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_2_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_2_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_2_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_2_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_2_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_2_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_2_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_2_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_2_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_2_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_2_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_2_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_2_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_2_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_2_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_2_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_2_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_2_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_2_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_2_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_2_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_2_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_2_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_2_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_2_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_2_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_2_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_2_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_2_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_2_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_2_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_2_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_2_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_2_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_2_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_2_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_2_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_2_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_2_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_2_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_2_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_2_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_2_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_2_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_2_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_2_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_2_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_2_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_2_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_2_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_2_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_2_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_2_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_2_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_2_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_2_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_2_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_2_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_2_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_2_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_2_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_2_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_2_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_2_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_2_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_2_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_2_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_2_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_2_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_2_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_2_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_2_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_2_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_2_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_2_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_2_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_2_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_118) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_2_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_2) // @[util.scala:504:26]
uops_2_br_mask <= _uops_2_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_121) begin // @[util.scala:520:18, :526:19, :528:35]
uops_3_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_3_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_3_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_3_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_3_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_3_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_3_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_3_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_3_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_3_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_3_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_3_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_3_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_3_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_3_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_3_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_3_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_3_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_3_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_3_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_3_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_3_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_3_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_3_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_3_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_3_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_3_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_3_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_3_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_3_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_3_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_3_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_3_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_3_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_3_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_3_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_3_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_3_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_3_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_3_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_3_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_3_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_3_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_3_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_3_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_3_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_3_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_3_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_3_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_3_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_3_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_3_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_3_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_3_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_3_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_3_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_3_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_3_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_3_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_3_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_3_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_3_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_3_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_3_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_3_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_3_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_3_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_3_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_3_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_3_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_3_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_3_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_3_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_3_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_3_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_3_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_3_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_3_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_120) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_3_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_3) // @[util.scala:504:26]
uops_3_br_mask <= _uops_3_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_123) begin // @[util.scala:520:18, :526:19, :528:35]
uops_4_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_4_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_4_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_4_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_4_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_4_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_4_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_4_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_4_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_4_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_4_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_4_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_4_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_4_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_4_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_4_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_4_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_4_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_4_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_4_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_4_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_4_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_4_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_4_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_4_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_4_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_4_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_4_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_4_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_4_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_4_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_4_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_4_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_4_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_4_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_4_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_4_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_4_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_4_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_4_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_4_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_4_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_4_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_4_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_4_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_4_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_4_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_4_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_4_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_4_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_4_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_4_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_4_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_4_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_4_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_4_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_4_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_4_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_4_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_4_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_4_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_4_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_4_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_4_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_4_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_4_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_4_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_4_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_4_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_4_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_4_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_4_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_4_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_4_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_4_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_4_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_4_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_4_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_122) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_4_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_4) // @[util.scala:504:26]
uops_4_br_mask <= _uops_4_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_125) begin // @[util.scala:520:18, :526:19, :528:35]
uops_5_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_5_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_5_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_5_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_5_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_5_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_5_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_5_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_5_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_5_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_5_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_5_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_5_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_5_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_5_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_5_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_5_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_5_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_5_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_5_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_5_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_5_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_5_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_5_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_5_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_5_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_5_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_5_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_5_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_5_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_5_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_5_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_5_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_5_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_5_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_5_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_5_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_5_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_5_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_5_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_5_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_5_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_5_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_5_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_5_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_5_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_5_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_5_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_5_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_5_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_5_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_5_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_5_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_5_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_5_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_5_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_5_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_5_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_5_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_5_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_5_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_5_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_5_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_5_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_5_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_5_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_5_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_5_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_5_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_5_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_5_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_5_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_5_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_5_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_5_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_5_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_5_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_5_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_124) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_5_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_5) // @[util.scala:504:26]
uops_5_br_mask <= _uops_5_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_127) begin // @[util.scala:520:18, :526:19, :528:35]
uops_6_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_6_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_6_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_6_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_6_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_6_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_6_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_6_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_6_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_6_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_6_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_6_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_6_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_6_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_6_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_6_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_6_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_6_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_6_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_6_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_6_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_6_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_6_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_6_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_6_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_6_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_6_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_6_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_6_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_6_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_6_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_6_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_6_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_6_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_6_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_6_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_6_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_6_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_6_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_6_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_6_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_6_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_6_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_6_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_6_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_6_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_6_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_6_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_6_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_6_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_6_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_6_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_6_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_6_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_6_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_6_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_6_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_6_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_6_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_6_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_6_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_6_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_6_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_6_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_6_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_6_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_6_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_6_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_6_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_6_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_6_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_6_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_6_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_6_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_6_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_6_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_6_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_6_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_126) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_6_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_6) // @[util.scala:504:26]
uops_6_br_mask <= _uops_6_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_129) begin // @[util.scala:520:18, :526:19, :528:35]
uops_7_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_7_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_7_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_7_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_7_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_7_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_7_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_7_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_7_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_7_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_7_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_7_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_7_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_7_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_7_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_7_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_7_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_7_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_7_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_7_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_7_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_7_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_7_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_7_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_7_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_7_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_7_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_7_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_7_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_7_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_7_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_7_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_7_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_7_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_7_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_7_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_7_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_7_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_7_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_7_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_7_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_7_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_7_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_7_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_7_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_7_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_7_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_7_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_7_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_7_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_7_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_7_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_7_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_7_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_7_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_7_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_7_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_7_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_7_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_7_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_7_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_7_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_7_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_7_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_7_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_7_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_7_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_7_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_7_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_7_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_7_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_7_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_7_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_7_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_7_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_7_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_7_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_7_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_128) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_7_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_7) // @[util.scala:504:26]
uops_7_br_mask <= _uops_7_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_131) begin // @[util.scala:520:18, :526:19, :528:35]
uops_8_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_8_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_8_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_8_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_8_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_8_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_8_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_8_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_8_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_8_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_8_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_8_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_8_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_8_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_8_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_8_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_8_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_8_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_8_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_8_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_8_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_8_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_8_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_8_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_8_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_8_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_8_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_8_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_8_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_8_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_8_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_8_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_8_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_8_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_8_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_8_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_8_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_8_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_8_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_8_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_8_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_8_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_8_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_8_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_8_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_8_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_8_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_8_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_8_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_8_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_8_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_8_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_8_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_8_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_8_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_8_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_8_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_8_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_8_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_8_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_8_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_8_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_8_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_8_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_8_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_8_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_8_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_8_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_8_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_8_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_8_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_8_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_8_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_8_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_8_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_8_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_8_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_8_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_8_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_8_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_8_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_8_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_8_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_130) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_8_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_8) // @[util.scala:504:26]
uops_8_br_mask <= _uops_8_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_133) begin // @[util.scala:520:18, :526:19, :528:35]
uops_9_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_9_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_9_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_9_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_9_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_9_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_9_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_9_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_9_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_9_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_9_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_9_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_9_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_9_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_9_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_9_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_9_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_9_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_9_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_9_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_9_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_9_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_9_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_9_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_9_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_9_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_9_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_9_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_9_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_9_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_9_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_9_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_9_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_9_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_9_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_9_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_9_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_9_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_9_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_9_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_9_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_9_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_9_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_9_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_9_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_9_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_9_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_9_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_9_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_9_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_9_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_9_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_9_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_9_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_9_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_9_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_9_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_9_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_9_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_9_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_9_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_9_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_9_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_9_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_9_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_9_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_9_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_9_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_9_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_9_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_9_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_9_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_9_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_9_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_9_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_9_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_9_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_9_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_9_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_9_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_9_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_9_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_9_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_132) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_9_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_9) // @[util.scala:504:26]
uops_9_br_mask <= _uops_9_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_135) begin // @[util.scala:520:18, :526:19, :528:35]
uops_10_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_10_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_10_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_10_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_10_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_10_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_10_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_10_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_10_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_10_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_10_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_10_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_10_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_10_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_10_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_10_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_10_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_10_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_10_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_10_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_10_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_10_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_10_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_10_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_10_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_10_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_10_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_10_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_10_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_10_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_10_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_10_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_10_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_10_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_10_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_10_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_10_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_10_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_10_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_10_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_10_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_10_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_10_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_10_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_10_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_10_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_10_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_10_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_10_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_10_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_10_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_10_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_10_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_10_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_10_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_10_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_10_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_10_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_10_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_10_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_10_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_10_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_10_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_10_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_10_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_10_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_10_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_10_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_10_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_10_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_10_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_10_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_10_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_10_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_10_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_10_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_10_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_10_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_10_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_10_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_10_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_10_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_10_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_134) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_10_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_10) // @[util.scala:504:26]
uops_10_br_mask <= _uops_10_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_137) begin // @[util.scala:520:18, :526:19, :528:35]
uops_11_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_11_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_11_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_11_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_11_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_11_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_11_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_11_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_11_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_11_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_11_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_11_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_11_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_11_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_11_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_11_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_11_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_11_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_11_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_11_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_11_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_11_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_11_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_11_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_11_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_11_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_11_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_11_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_11_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_11_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_11_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_11_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_11_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_11_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_11_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_11_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_11_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_11_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_11_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_11_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_11_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_11_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_11_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_11_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_11_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_11_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_11_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_11_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_11_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_11_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_11_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_11_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_11_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_11_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_11_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_11_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_11_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_11_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_11_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_11_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_11_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_11_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_11_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_11_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_11_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_11_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_11_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_11_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_11_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_11_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_11_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_11_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_11_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_11_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_11_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_11_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_11_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_11_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_11_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_11_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_11_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_11_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_11_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_136) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_11_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_11) // @[util.scala:504:26]
uops_11_br_mask <= _uops_11_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_139) begin // @[util.scala:520:18, :526:19, :528:35]
uops_12_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_12_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_12_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_12_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_12_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_12_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_12_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_12_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_12_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_12_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_12_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_12_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_12_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_12_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_12_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_12_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_12_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_12_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_12_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_12_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_12_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_12_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_12_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_12_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_12_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_12_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_12_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_12_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_12_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_12_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_12_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_12_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_12_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_12_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_12_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_12_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_12_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_12_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_12_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_12_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_12_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_12_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_12_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_12_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_12_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_12_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_12_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_12_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_12_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_12_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_12_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_12_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_12_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_12_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_12_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_12_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_12_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_12_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_12_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_12_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_12_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_12_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_12_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_12_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_12_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_12_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_12_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_12_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_12_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_12_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_12_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_12_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_12_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_12_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_12_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_12_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_12_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_12_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_12_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_12_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_12_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_12_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_12_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_138) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_12_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_12) // @[util.scala:504:26]
uops_12_br_mask <= _uops_12_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_141) begin // @[util.scala:520:18, :526:19, :528:35]
uops_13_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_13_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_13_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_13_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_13_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_13_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_13_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_13_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_13_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_13_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_13_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_13_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_13_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_13_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_13_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_13_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_13_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_13_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_13_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_13_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_13_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_13_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_13_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_13_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_13_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_13_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_13_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_13_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_13_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_13_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_13_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_13_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_13_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_13_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_13_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_13_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_13_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_13_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_13_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_13_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_13_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_13_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_13_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_13_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_13_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_13_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_13_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_13_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_13_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_13_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_13_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_13_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_13_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_13_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_13_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_13_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_13_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_13_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_13_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_13_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_13_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_13_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_13_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_13_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_13_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_13_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_13_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_13_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_13_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_13_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_13_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_13_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_13_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_13_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_13_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_13_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_13_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_13_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_13_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_13_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_13_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_13_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_13_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_140) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_13_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_13) // @[util.scala:504:26]
uops_13_br_mask <= _uops_13_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_142) begin // @[util.scala:520:18, :526:19, :528:35]
uops_14_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_14_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_14_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_14_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_14_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_14_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_14_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_14_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_14_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_14_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_14_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_14_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_14_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_14_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_14_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_14_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_14_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_14_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_14_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_14_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_14_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_14_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_14_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_14_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_14_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_14_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_14_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_14_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_14_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_14_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_14_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_14_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_14_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_14_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_14_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_14_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_14_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_14_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_14_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_14_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_14_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_14_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_14_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_14_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_14_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_14_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_14_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_14_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_14_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_14_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_14_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_14_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_14_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_14_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_14_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_14_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_14_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_14_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_14_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_14_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_14_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_14_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_14_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_14_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_14_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_14_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_14_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_14_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_14_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_14_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_14_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_14_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_14_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_14_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_14_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_14_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_14_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_14_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_14_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_14_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_14_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_14_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_14_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & wrap) // @[Counter.scala:73:24]
uops_14_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_14) // @[util.scala:504:26]
uops_14_br_mask <= _uops_14_br_mask_T_1; // @[util.scala:97:21, :505:22]
always @(posedge)
ram_15x131 ram_ext ( // @[util.scala:503:22]
.R0_addr (deq_ptr_value), // @[Counter.scala:61:40]
.R0_en (1'h1),
.R0_clk (clock),
.R0_data (_ram_ext_R0_data),
.W0_addr (enq_ptr_value), // @[Counter.scala:61:40]
.W0_en (do_enq), // @[util.scala:514:26]
.W0_clk (clock),
.W0_data ({io_enq_bits_sdq_id_0, io_enq_bits_way_en_0, io_enq_bits_old_meta_tag_0, io_enq_bits_old_meta_coh_state_0, io_enq_bits_tag_match_0, io_enq_bits_is_hella_0, io_enq_bits_data_0, io_enq_bits_addr_0}) // @[util.scala:458:7, :503:22]
); // @[util.scala:503:22]
assign io_enq_ready = io_enq_ready_0; // @[util.scala:458:7]
assign io_deq_valid = io_deq_valid_0; // @[util.scala:458:7]
assign io_deq_bits_uop_inst = io_deq_bits_uop_inst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_debug_inst = io_deq_bits_uop_debug_inst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_rvc = io_deq_bits_uop_is_rvc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_debug_pc = io_deq_bits_uop_debug_pc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iq_type_0 = io_deq_bits_uop_iq_type_0_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iq_type_1 = io_deq_bits_uop_iq_type_1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iq_type_2 = io_deq_bits_uop_iq_type_2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iq_type_3 = io_deq_bits_uop_iq_type_3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_0 = io_deq_bits_uop_fu_code_0_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_1 = io_deq_bits_uop_fu_code_1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_2 = io_deq_bits_uop_fu_code_2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_3 = io_deq_bits_uop_fu_code_3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_4 = io_deq_bits_uop_fu_code_4_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_5 = io_deq_bits_uop_fu_code_5_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_6 = io_deq_bits_uop_fu_code_6_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_7 = io_deq_bits_uop_fu_code_7_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_8 = io_deq_bits_uop_fu_code_8_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_9 = io_deq_bits_uop_fu_code_9_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_issued = io_deq_bits_uop_iw_issued_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_issued_partial_agen = io_deq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_issued_partial_dgen = io_deq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p1_speculative_child = io_deq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p2_speculative_child = io_deq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p1_bypass_hint = io_deq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p2_bypass_hint = io_deq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p3_bypass_hint = io_deq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_dis_col_sel = io_deq_bits_uop_dis_col_sel_0; // @[util.scala:458:7]
assign io_deq_bits_uop_br_mask = io_deq_bits_uop_br_mask_0; // @[util.scala:458:7]
assign io_deq_bits_uop_br_tag = io_deq_bits_uop_br_tag_0; // @[util.scala:458:7]
assign io_deq_bits_uop_br_type = io_deq_bits_uop_br_type_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_sfb = io_deq_bits_uop_is_sfb_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_fence = io_deq_bits_uop_is_fence_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_fencei = io_deq_bits_uop_is_fencei_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_sfence = io_deq_bits_uop_is_sfence_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_amo = io_deq_bits_uop_is_amo_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_eret = io_deq_bits_uop_is_eret_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_sys_pc2epc = io_deq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_rocc = io_deq_bits_uop_is_rocc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_mov = io_deq_bits_uop_is_mov_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ftq_idx = io_deq_bits_uop_ftq_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_edge_inst = io_deq_bits_uop_edge_inst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_pc_lob = io_deq_bits_uop_pc_lob_0; // @[util.scala:458:7]
assign io_deq_bits_uop_taken = io_deq_bits_uop_taken_0; // @[util.scala:458:7]
assign io_deq_bits_uop_imm_rename = io_deq_bits_uop_imm_rename_0; // @[util.scala:458:7]
assign io_deq_bits_uop_imm_sel = io_deq_bits_uop_imm_sel_0; // @[util.scala:458:7]
assign io_deq_bits_uop_pimm = io_deq_bits_uop_pimm_0; // @[util.scala:458:7]
assign io_deq_bits_uop_imm_packed = io_deq_bits_uop_imm_packed_0; // @[util.scala:458:7]
assign io_deq_bits_uop_op1_sel = io_deq_bits_uop_op1_sel_0; // @[util.scala:458:7]
assign io_deq_bits_uop_op2_sel = io_deq_bits_uop_op2_sel_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_ldst = io_deq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_wen = io_deq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_ren1 = io_deq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_ren2 = io_deq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_ren3 = io_deq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_swap12 = io_deq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_swap23 = io_deq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_typeTagIn = io_deq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_typeTagOut = io_deq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_fromint = io_deq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_toint = io_deq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_fastpipe = io_deq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_fma = io_deq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_div = io_deq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_sqrt = io_deq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_wflags = io_deq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_vec = io_deq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7]
assign io_deq_bits_uop_rob_idx = io_deq_bits_uop_rob_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ldq_idx = io_deq_bits_uop_ldq_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_stq_idx = io_deq_bits_uop_stq_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_rxq_idx = io_deq_bits_uop_rxq_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_pdst = io_deq_bits_uop_pdst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs1 = io_deq_bits_uop_prs1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs2 = io_deq_bits_uop_prs2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs3 = io_deq_bits_uop_prs3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ppred = io_deq_bits_uop_ppred_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs1_busy = io_deq_bits_uop_prs1_busy_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs2_busy = io_deq_bits_uop_prs2_busy_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs3_busy = io_deq_bits_uop_prs3_busy_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ppred_busy = io_deq_bits_uop_ppred_busy_0; // @[util.scala:458:7]
assign io_deq_bits_uop_stale_pdst = io_deq_bits_uop_stale_pdst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_exception = io_deq_bits_uop_exception_0; // @[util.scala:458:7]
assign io_deq_bits_uop_exc_cause = io_deq_bits_uop_exc_cause_0; // @[util.scala:458:7]
assign io_deq_bits_uop_mem_cmd = io_deq_bits_uop_mem_cmd_0; // @[util.scala:458:7]
assign io_deq_bits_uop_mem_size = io_deq_bits_uop_mem_size_0; // @[util.scala:458:7]
assign io_deq_bits_uop_mem_signed = io_deq_bits_uop_mem_signed_0; // @[util.scala:458:7]
assign io_deq_bits_uop_uses_ldq = io_deq_bits_uop_uses_ldq_0; // @[util.scala:458:7]
assign io_deq_bits_uop_uses_stq = io_deq_bits_uop_uses_stq_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_unique = io_deq_bits_uop_is_unique_0; // @[util.scala:458:7]
assign io_deq_bits_uop_flush_on_commit = io_deq_bits_uop_flush_on_commit_0; // @[util.scala:458:7]
assign io_deq_bits_uop_csr_cmd = io_deq_bits_uop_csr_cmd_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ldst_is_rs1 = io_deq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ldst = io_deq_bits_uop_ldst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs1 = io_deq_bits_uop_lrs1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs2 = io_deq_bits_uop_lrs2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs3 = io_deq_bits_uop_lrs3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_dst_rtype = io_deq_bits_uop_dst_rtype_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs1_rtype = io_deq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs2_rtype = io_deq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7]
assign io_deq_bits_uop_frs3_en = io_deq_bits_uop_frs3_en_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fcn_dw = io_deq_bits_uop_fcn_dw_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fcn_op = io_deq_bits_uop_fcn_op_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_val = io_deq_bits_uop_fp_val_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_rm = io_deq_bits_uop_fp_rm_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_typ = io_deq_bits_uop_fp_typ_0; // @[util.scala:458:7]
assign io_deq_bits_uop_xcpt_pf_if = io_deq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_xcpt_ae_if = io_deq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_xcpt_ma_if = io_deq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_bp_debug_if = io_deq_bits_uop_bp_debug_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_bp_xcpt_if = io_deq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_debug_fsrc = io_deq_bits_uop_debug_fsrc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_debug_tsrc = io_deq_bits_uop_debug_tsrc_0; // @[util.scala:458:7]
assign io_deq_bits_addr = io_deq_bits_addr_0; // @[util.scala:458:7]
assign io_deq_bits_data = io_deq_bits_data_0; // @[util.scala:458:7]
assign io_deq_bits_is_hella = io_deq_bits_is_hella_0; // @[util.scala:458:7]
assign io_deq_bits_tag_match = io_deq_bits_tag_match_0; // @[util.scala:458:7]
assign io_deq_bits_old_meta_coh_state = io_deq_bits_old_meta_coh_state_0; // @[util.scala:458:7]
assign io_deq_bits_old_meta_tag = io_deq_bits_old_meta_tag_0; // @[util.scala:458:7]
assign io_deq_bits_way_en = io_deq_bits_way_en_0; // @[util.scala:458:7]
assign io_deq_bits_sdq_id = io_deq_bits_sdq_id_0; // @[util.scala:458:7]
assign io_empty = io_empty_0; // @[util.scala:458:7]
assign io_count = io_count_0; // @[util.scala:458:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_7( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [28:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [28:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_37 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_39 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_43 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_45 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_49 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_51 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_55 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_57 = 1'h1; // @[Parameters.scala:57:20]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_first_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_first_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_first_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_first_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_set_wo_ready_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_set_wo_ready_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_opcodes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_opcodes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_sizes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_sizes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_opcodes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_opcodes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_sizes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_sizes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_probe_ack_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_probe_ack_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_probe_ack_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_probe_ack_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _same_cycle_resp_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _same_cycle_resp_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _same_cycle_resp_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _same_cycle_resp_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _same_cycle_resp_WIRE_4_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _same_cycle_resp_WIRE_5_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_wo_ready_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_wo_ready_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [1026:0] _c_opcodes_set_T_1 = 1027'h0; // @[Monitor.scala:767:54]
wire [1026:0] _c_sizes_set_T_1 = 1027'h0; // @[Monitor.scala:768:52]
wire [9:0] _c_opcodes_set_T = 10'h0; // @[Monitor.scala:767:79]
wire [9:0] _c_sizes_set_T = 10'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [127:0] _c_set_wo_ready_T = 128'h1; // @[OneHot.scala:58:35]
wire [127:0] _c_set_T = 128'h1; // @[OneHot.scala:58:35]
wire [259:0] c_opcodes_set = 260'h0; // @[Monitor.scala:740:34]
wire [259:0] c_sizes_set = 260'h0; // @[Monitor.scala:741:34]
wire [64:0] c_set = 65'h0; // @[Monitor.scala:738:34]
wire [64:0] c_set_wo_ready = 65'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [6:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_4 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_13 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_19 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_2 = _source_ok_T_1 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_8 = _source_ok_T_7 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_14 = _source_ok_T_13 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_20 = _source_ok_T_19 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31]
wire _source_ok_T_25 = io_in_a_bits_source_0 == 7'h21; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_5 = _source_ok_T_25; // @[Parameters.scala:1138:31]
wire _source_ok_T_26 = io_in_a_bits_source_0 == 7'h20; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_6 = _source_ok_T_26; // @[Parameters.scala:1138:31]
wire _source_ok_T_27 = io_in_a_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_7 = _source_ok_T_27; // @[Parameters.scala:1138:31]
wire _source_ok_T_28 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_29 = _source_ok_T_28 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_30 = _source_ok_T_29 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_31 = _source_ok_T_30 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_32 = _source_ok_T_31 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_33 = _source_ok_T_32 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_33 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [28:0] _is_aligned_T = {23'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 29'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_34 = io_in_d_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_34; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_35 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_41 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_47 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_53 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_36 = _source_ok_T_35 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_38 = _source_ok_T_36; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_40 = _source_ok_T_38; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_40; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_42 = _source_ok_T_41 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_44 = _source_ok_T_42; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_46 = _source_ok_T_44; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_46; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_48 = _source_ok_T_47 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_50 = _source_ok_T_48; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_52 = _source_ok_T_50; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_52; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_54 = _source_ok_T_53 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_56 = _source_ok_T_54; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_58 = _source_ok_T_56; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_4 = _source_ok_T_58; // @[Parameters.scala:1138:31]
wire _source_ok_T_59 = io_in_d_bits_source_0 == 7'h21; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_5 = _source_ok_T_59; // @[Parameters.scala:1138:31]
wire _source_ok_T_60 = io_in_d_bits_source_0 == 7'h20; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_6 = _source_ok_T_60; // @[Parameters.scala:1138:31]
wire _source_ok_T_61 = io_in_d_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_7 = _source_ok_T_61; // @[Parameters.scala:1138:31]
wire _source_ok_T_62 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_63 = _source_ok_T_62 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_64 = _source_ok_T_63 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_65 = _source_ok_T_64 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_66 = _source_ok_T_65 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_67 = _source_ok_T_66 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_67 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46]
wire _T_1048 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1048; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1048; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [6:0] source; // @[Monitor.scala:390:22]
reg [28:0] address; // @[Monitor.scala:391:22]
wire _T_1121 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1121; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1121; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1121; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [6:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [64:0] inflight; // @[Monitor.scala:614:27]
reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [259:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [64:0] a_set; // @[Monitor.scala:626:34]
wire [64:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [259:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [259:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [9:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [9:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [9:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [9:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [9:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [9:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [9:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [9:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [9:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [259:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [259:0] _a_opcode_lookup_T_6 = {256'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [259:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [259:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [259:0] _a_size_lookup_T_6 = {256'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [259:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[259:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [127:0] _GEN_2 = 128'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [127:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_974 = _T_1048 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_974 ? _a_set_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_974 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_974 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [9:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [9:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [9:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [1026:0] _a_opcodes_set_T_1 = {1023'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_974 ? _a_opcodes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [1026:0] _a_sizes_set_T_1 = {1023'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_974 ? _a_sizes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [64:0] d_clr; // @[Monitor.scala:664:34]
wire [64:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [259:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [259:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_1020 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [127:0] _GEN_5 = 128'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1020 & ~d_release_ack ? _d_clr_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_989 = _T_1121 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_989 ? _d_clr_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_5 = 1039'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_989 ? _d_opcodes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [1038:0] _d_sizes_clr_T_5 = 1039'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_989 ? _d_sizes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [64:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [64:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [64:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [259:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [259:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [259:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [259:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [259:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [259:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [64:0] inflight_1; // @[Monitor.scala:726:35]
wire [64:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [259:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [259:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [259:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [259:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [259:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [259:0] _c_opcode_lookup_T_6 = {256'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [259:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [259:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [259:0] _c_size_lookup_T_6 = {256'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [259:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[259:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [64:0] d_clr_1; // @[Monitor.scala:774:34]
wire [64:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [259:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [259:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1092 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1092 & d_release_ack_1 ? _d_clr_wo_ready_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1074 = _T_1121 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1074 ? _d_clr_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_11 = 1039'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1074 ? _d_opcodes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [1038:0] _d_sizes_clr_T_11 = 1039'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1074 ? _d_sizes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 7'h0; // @[Monitor.scala:36:7, :795:113]
wire [64:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [64:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [259:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [259:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [259:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [259:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_2( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [1:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_b_ready, // @[Monitor.scala:20:14]
input io_in_b_valid, // @[Monitor.scala:20:14]
input [1:0] io_in_b_bits_param, // @[Monitor.scala:20:14]
input [1:0] io_in_b_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_b_bits_address, // @[Monitor.scala:20:14]
input io_in_c_ready, // @[Monitor.scala:20:14]
input io_in_c_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_c_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_c_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_c_bits_size, // @[Monitor.scala:20:14]
input [1:0] io_in_c_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_c_bits_address, // @[Monitor.scala:20:14]
input [63:0] io_in_c_bits_data, // @[Monitor.scala:20:14]
input io_in_c_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_e_ready, // @[Monitor.scala:20:14]
input io_in_e_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_e_bits_sink // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [3:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [1:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [31:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_b_ready_0 = io_in_b_ready; // @[Monitor.scala:36:7]
wire io_in_b_valid_0 = io_in_b_valid; // @[Monitor.scala:36:7]
wire [1:0] io_in_b_bits_param_0 = io_in_b_bits_param; // @[Monitor.scala:36:7]
wire [1:0] io_in_b_bits_source_0 = io_in_b_bits_source; // @[Monitor.scala:36:7]
wire [31:0] io_in_b_bits_address_0 = io_in_b_bits_address; // @[Monitor.scala:36:7]
wire io_in_c_ready_0 = io_in_c_ready; // @[Monitor.scala:36:7]
wire io_in_c_valid_0 = io_in_c_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_c_bits_opcode_0 = io_in_c_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_c_bits_param_0 = io_in_c_bits_param; // @[Monitor.scala:36:7]
wire [3:0] io_in_c_bits_size_0 = io_in_c_bits_size; // @[Monitor.scala:36:7]
wire [1:0] io_in_c_bits_source_0 = io_in_c_bits_source; // @[Monitor.scala:36:7]
wire [31:0] io_in_c_bits_address_0 = io_in_c_bits_address; // @[Monitor.scala:36:7]
wire [63:0] io_in_c_bits_data_0 = io_in_c_bits_data; // @[Monitor.scala:36:7]
wire io_in_c_bits_corrupt_0 = io_in_c_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7]
wire [3:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_e_ready_0 = io_in_e_ready; // @[Monitor.scala:36:7]
wire io_in_e_valid_0 = io_in_e_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_e_bits_sink_0 = io_in_e_bits_sink; // @[Monitor.scala:36:7]
wire sink_ok = 1'h1; // @[Monitor.scala:309:31]
wire mask_sub_sub_sub_0_1_1 = 1'h1; // @[Misc.scala:206:21]
wire mask_sub_sub_size_1 = 1'h1; // @[Misc.scala:209:26]
wire mask_sub_sub_0_1_1 = 1'h1; // @[Misc.scala:215:29]
wire mask_sub_sub_1_1_1 = 1'h1; // @[Misc.scala:215:29]
wire mask_sub_0_1_1 = 1'h1; // @[Misc.scala:215:29]
wire mask_sub_1_1_1 = 1'h1; // @[Misc.scala:215:29]
wire mask_sub_2_1_1 = 1'h1; // @[Misc.scala:215:29]
wire mask_sub_3_1_1 = 1'h1; // @[Misc.scala:215:29]
wire mask_size_1 = 1'h1; // @[Misc.scala:209:26]
wire mask_acc_8 = 1'h1; // @[Misc.scala:215:29]
wire mask_acc_9 = 1'h1; // @[Misc.scala:215:29]
wire mask_acc_10 = 1'h1; // @[Misc.scala:215:29]
wire mask_acc_11 = 1'h1; // @[Misc.scala:215:29]
wire mask_acc_12 = 1'h1; // @[Misc.scala:215:29]
wire mask_acc_13 = 1'h1; // @[Misc.scala:215:29]
wire mask_acc_14 = 1'h1; // @[Misc.scala:215:29]
wire mask_acc_15 = 1'h1; // @[Misc.scala:215:29]
wire sink_ok_1 = 1'h1; // @[Monitor.scala:367:31]
wire _b_first_beats1_opdata_T = 1'h1; // @[Edges.scala:97:37]
wire _b_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire b_first_last = 1'h1; // @[Edges.scala:232:33]
wire [3:0] io_in_b_bits_size = 4'h6; // @[Monitor.scala:36:7]
wire [3:0] _mask_sizeOH_T_3 = 4'h6; // @[Misc.scala:202:34]
wire [2:0] io_in_b_bits_opcode = 3'h6; // @[Monitor.scala:36:7]
wire [7:0] io_in_b_bits_mask = 8'hFF; // @[Monitor.scala:36:7]
wire [7:0] mask_1 = 8'hFF; // @[Misc.scala:222:10]
wire [63:0] io_in_b_bits_data = 64'h0; // @[Monitor.scala:36:7]
wire io_in_b_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire mask_sub_size_1 = 1'h0; // @[Misc.scala:209:26]
wire _mask_sub_acc_T_4 = 1'h0; // @[Misc.scala:215:38]
wire _mask_sub_acc_T_5 = 1'h0; // @[Misc.scala:215:38]
wire _mask_sub_acc_T_6 = 1'h0; // @[Misc.scala:215:38]
wire _mask_sub_acc_T_7 = 1'h0; // @[Misc.scala:215:38]
wire _legal_source_T_3 = 1'h0; // @[Mux.scala:30:73]
wire b_first_beats1_opdata = 1'h0; // @[Edges.scala:97:28]
wire [15:0] _a_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hFF; // @[Monitor.scala:612:57]
wire [15:0] _c_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hFF; // @[Monitor.scala:724:57]
wire [16:0] _a_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hFF; // @[Monitor.scala:612:57]
wire [16:0] _c_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hFF; // @[Monitor.scala:724:57]
wire [15:0] _a_size_lookup_T_3 = 16'h100; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h100; // @[Monitor.scala:612:51]
wire [15:0] _c_size_lookup_T_3 = 16'h100; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h100; // @[Monitor.scala:724:51]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [3:0] _mask_sizeOH_T_4 = 4'h4; // @[OneHot.scala:65:12]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [2:0] _mask_sizeOH_T_5 = 3'h4; // @[OneHot.scala:65:27]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] mask_sizeOH_1 = 3'h5; // @[Misc.scala:202:81]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [8:0] b_first_beats1 = 9'h0; // @[Edges.scala:221:14]
wire [8:0] b_first_count = 9'h0; // @[Edges.scala:234:25]
wire [8:0] b_first_beats1_decode = 9'h7; // @[Edges.scala:220:59]
wire [11:0] is_aligned_mask_1 = 12'h3F; // @[package.scala:243:46]
wire [11:0] _b_first_beats1_decode_T_2 = 12'h3F; // @[package.scala:243:46]
wire [11:0] _is_aligned_mask_T_3 = 12'hFC0; // @[package.scala:243:76]
wire [11:0] _b_first_beats1_decode_T_1 = 12'hFC0; // @[package.scala:243:76]
wire [26:0] _is_aligned_mask_T_2 = 27'h3FFC0; // @[package.scala:243:71]
wire [26:0] _b_first_beats1_decode_T = 27'h3FFC0; // @[package.scala:243:71]
wire [3:0] mask_lo_1 = 4'hF; // @[Misc.scala:222:10]
wire [3:0] mask_hi_1 = 4'hF; // @[Misc.scala:222:10]
wire [1:0] mask_lo_lo_1 = 2'h3; // @[Misc.scala:222:10]
wire [1:0] mask_lo_hi_1 = 2'h3; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo_1 = 2'h3; // @[Misc.scala:222:10]
wire [1:0] mask_hi_hi_1 = 2'h3; // @[Misc.scala:222:10]
wire [1:0] mask_sizeOH_shiftAmount_1 = 2'h2; // @[OneHot.scala:64:49]
wire [3:0] _a_size_lookup_T_2 = 4'h8; // @[Monitor.scala:641:117]
wire [3:0] _d_sizes_clr_T = 4'h8; // @[Monitor.scala:681:48]
wire [3:0] _c_size_lookup_T_2 = 4'h8; // @[Monitor.scala:750:119]
wire [3:0] _d_sizes_clr_T_6 = 4'h8; // @[Monitor.scala:791:48]
wire [3:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [31:0] _address_ok_T = io_in_b_bits_address_0; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_70 = io_in_c_bits_address_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 2'h0; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire _source_ok_T_1 = io_in_a_bits_source_0 == 2'h1; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1 = _source_ok_T_1; // @[Parameters.scala:1138:31]
wire _source_ok_T_2 = io_in_a_bits_source_0 == 2'h2; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_2 = _source_ok_T_2; // @[Parameters.scala:1138:31]
wire _source_ok_T_3 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_3 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire [26:0] _GEN = 27'hFFF << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [26:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [26:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [26:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [11:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [31:0] _is_aligned_T = {20'h0, io_in_a_bits_address_0[11:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 32'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 4'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire _source_ok_T_4 = io_in_d_bits_source_0 == 2'h0; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_4; // @[Parameters.scala:1138:31]
wire _source_ok_T_5 = io_in_d_bits_source_0 == 2'h1; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_1 = _source_ok_T_5; // @[Parameters.scala:1138:31]
wire _source_ok_T_6 = io_in_d_bits_source_0 == 2'h2; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_2 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire _source_ok_T_7 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_7 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _legal_source_T = io_in_b_bits_source_0 == 2'h0; // @[Monitor.scala:36:7]
wire _legal_source_T_1 = io_in_b_bits_source_0 == 2'h1; // @[Monitor.scala:36:7]
wire _legal_source_T_2 = io_in_b_bits_source_0 == 2'h2; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_1 = {1'h0, _address_ok_T}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_2 = _address_ok_T_1 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_3 = _address_ok_T_2; // @[Parameters.scala:137:46]
wire _address_ok_T_4 = _address_ok_T_3 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_0 = _address_ok_T_4; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_5 = {io_in_b_bits_address_0[31:13], io_in_b_bits_address_0[12:0] ^ 13'h1000}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_6 = {1'h0, _address_ok_T_5}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_7 = _address_ok_T_6 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_8 = _address_ok_T_7; // @[Parameters.scala:137:46]
wire _address_ok_T_9 = _address_ok_T_8 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1 = _address_ok_T_9; // @[Parameters.scala:612:40]
wire [13:0] _GEN_0 = io_in_b_bits_address_0[13:0] ^ 14'h3000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_10 = {io_in_b_bits_address_0[31:14], _GEN_0}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_11 = {1'h0, _address_ok_T_10}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_12 = _address_ok_T_11 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_13 = _address_ok_T_12; // @[Parameters.scala:137:46]
wire _address_ok_T_14 = _address_ok_T_13 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_2 = _address_ok_T_14; // @[Parameters.scala:612:40]
wire [16:0] _GEN_1 = io_in_b_bits_address_0[16:0] ^ 17'h10000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_15 = {io_in_b_bits_address_0[31:17], _GEN_1}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_16 = {1'h0, _address_ok_T_15}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_17 = _address_ok_T_16 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_18 = _address_ok_T_17; // @[Parameters.scala:137:46]
wire _address_ok_T_19 = _address_ok_T_18 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_3 = _address_ok_T_19; // @[Parameters.scala:612:40]
wire [20:0] _GEN_2 = io_in_b_bits_address_0[20:0] ^ 21'h100000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_20 = {io_in_b_bits_address_0[31:21], _GEN_2}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_21 = {1'h0, _address_ok_T_20}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_22 = _address_ok_T_21 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_23 = _address_ok_T_22; // @[Parameters.scala:137:46]
wire _address_ok_T_24 = _address_ok_T_23 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_4 = _address_ok_T_24; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_25 = {io_in_b_bits_address_0[31:21], io_in_b_bits_address_0[20:0] ^ 21'h110000}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_26 = {1'h0, _address_ok_T_25}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_27 = _address_ok_T_26 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_28 = _address_ok_T_27; // @[Parameters.scala:137:46]
wire _address_ok_T_29 = _address_ok_T_28 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_5 = _address_ok_T_29; // @[Parameters.scala:612:40]
wire [25:0] _GEN_3 = io_in_b_bits_address_0[25:0] ^ 26'h2000000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_30 = {io_in_b_bits_address_0[31:26], _GEN_3}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_31 = {1'h0, _address_ok_T_30}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_32 = _address_ok_T_31 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_33 = _address_ok_T_32; // @[Parameters.scala:137:46]
wire _address_ok_T_34 = _address_ok_T_33 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_6 = _address_ok_T_34; // @[Parameters.scala:612:40]
wire [25:0] _GEN_4 = io_in_b_bits_address_0[25:0] ^ 26'h2010000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_35 = {io_in_b_bits_address_0[31:26], _GEN_4}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_36 = {1'h0, _address_ok_T_35}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_37 = _address_ok_T_36 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_38 = _address_ok_T_37; // @[Parameters.scala:137:46]
wire _address_ok_T_39 = _address_ok_T_38 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_7 = _address_ok_T_39; // @[Parameters.scala:612:40]
wire [27:0] _GEN_5 = io_in_b_bits_address_0[27:0] ^ 28'h8000000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_40 = {io_in_b_bits_address_0[31:28], _GEN_5}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_41 = {1'h0, _address_ok_T_40}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_42 = _address_ok_T_41 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_43 = _address_ok_T_42; // @[Parameters.scala:137:46]
wire _address_ok_T_44 = _address_ok_T_43 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_8 = _address_ok_T_44; // @[Parameters.scala:612:40]
wire [27:0] _GEN_6 = io_in_b_bits_address_0[27:0] ^ 28'hC000000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_45 = {io_in_b_bits_address_0[31:28], _GEN_6}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_46 = {1'h0, _address_ok_T_45}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_47 = _address_ok_T_46 & 33'h1FC000000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_48 = _address_ok_T_47; // @[Parameters.scala:137:46]
wire _address_ok_T_49 = _address_ok_T_48 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_9 = _address_ok_T_49; // @[Parameters.scala:612:40]
wire [28:0] _GEN_7 = io_in_b_bits_address_0[28:0] ^ 29'h10020000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_50 = {io_in_b_bits_address_0[31:29], _GEN_7}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_51 = {1'h0, _address_ok_T_50}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_52 = _address_ok_T_51 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_53 = _address_ok_T_52; // @[Parameters.scala:137:46]
wire _address_ok_T_54 = _address_ok_T_53 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_10 = _address_ok_T_54; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_55 = io_in_b_bits_address_0 ^ 32'h80000000; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_56 = {1'h0, _address_ok_T_55}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_57 = _address_ok_T_56 & 33'h1F0000000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_58 = _address_ok_T_57; // @[Parameters.scala:137:46]
wire _address_ok_T_59 = _address_ok_T_58 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_11 = _address_ok_T_59; // @[Parameters.scala:612:40]
wire _address_ok_T_60 = _address_ok_WIRE_0 | _address_ok_WIRE_1; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_61 = _address_ok_T_60 | _address_ok_WIRE_2; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_62 = _address_ok_T_61 | _address_ok_WIRE_3; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_63 = _address_ok_T_62 | _address_ok_WIRE_4; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_64 = _address_ok_T_63 | _address_ok_WIRE_5; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_65 = _address_ok_T_64 | _address_ok_WIRE_6; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_66 = _address_ok_T_65 | _address_ok_WIRE_7; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_67 = _address_ok_T_66 | _address_ok_WIRE_8; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_68 = _address_ok_T_67 | _address_ok_WIRE_9; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_69 = _address_ok_T_68 | _address_ok_WIRE_10; // @[Parameters.scala:612:40, :636:64]
wire address_ok = _address_ok_T_69 | _address_ok_WIRE_11; // @[Parameters.scala:612:40, :636:64]
wire [31:0] _is_aligned_T_1 = {26'h0, io_in_b_bits_address_0[5:0]}; // @[Monitor.scala:36:7]
wire is_aligned_1 = _is_aligned_T_1 == 32'h0; // @[Edges.scala:21:{16,24}]
wire mask_sub_sub_bit_1 = io_in_b_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2_1 = mask_sub_sub_bit_1; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit_1 = ~mask_sub_sub_bit_1; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2_1 = mask_sub_sub_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T_2 = mask_sub_sub_0_2_1; // @[Misc.scala:214:27, :215:38]
wire _mask_sub_sub_acc_T_3 = mask_sub_sub_1_2_1; // @[Misc.scala:214:27, :215:38]
wire mask_sub_bit_1 = io_in_b_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit_1 = ~mask_sub_bit_1; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2_1 = mask_sub_sub_0_2_1 & mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27]
wire mask_sub_1_2_1 = mask_sub_sub_0_2_1 & mask_sub_bit_1; // @[Misc.scala:210:26, :214:27]
wire mask_sub_2_2_1 = mask_sub_sub_1_2_1 & mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27]
wire mask_sub_3_2_1 = mask_sub_sub_1_2_1 & mask_sub_bit_1; // @[Misc.scala:210:26, :214:27]
wire mask_bit_1 = io_in_b_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit_1 = ~mask_bit_1; // @[Misc.scala:210:26, :211:20]
wire mask_eq_8 = mask_sub_0_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_8 = mask_eq_8; // @[Misc.scala:214:27, :215:38]
wire mask_eq_9 = mask_sub_0_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_9 = mask_eq_9; // @[Misc.scala:214:27, :215:38]
wire mask_eq_10 = mask_sub_1_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_10 = mask_eq_10; // @[Misc.scala:214:27, :215:38]
wire mask_eq_11 = mask_sub_1_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_11 = mask_eq_11; // @[Misc.scala:214:27, :215:38]
wire mask_eq_12 = mask_sub_2_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_12 = mask_eq_12; // @[Misc.scala:214:27, :215:38]
wire mask_eq_13 = mask_sub_2_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_13 = mask_eq_13; // @[Misc.scala:214:27, :215:38]
wire mask_eq_14 = mask_sub_3_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_14 = mask_eq_14; // @[Misc.scala:214:27, :215:38]
wire mask_eq_15 = mask_sub_3_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_15 = mask_eq_15; // @[Misc.scala:214:27, :215:38]
wire _legal_source_WIRE_0 = _legal_source_T; // @[Parameters.scala:1138:31]
wire _legal_source_WIRE_1 = _legal_source_T_1; // @[Parameters.scala:1138:31]
wire _legal_source_WIRE_2 = _legal_source_T_2; // @[Parameters.scala:1138:31]
wire _legal_source_T_4 = _legal_source_WIRE_1; // @[Mux.scala:30:73]
wire _legal_source_T_6 = _legal_source_T_4; // @[Mux.scala:30:73]
wire [1:0] _legal_source_T_5 = {_legal_source_WIRE_2, 1'h0}; // @[Mux.scala:30:73]
wire [1:0] _legal_source_T_7 = {1'h0, _legal_source_T_6} | _legal_source_T_5; // @[Mux.scala:30:73]
wire [1:0] _legal_source_WIRE_1_0 = _legal_source_T_7; // @[Mux.scala:30:73]
wire legal_source = _legal_source_WIRE_1_0 == io_in_b_bits_source_0; // @[Mux.scala:30:73]
wire _source_ok_T_8 = io_in_c_bits_source_0 == 2'h0; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_2_0 = _source_ok_T_8; // @[Parameters.scala:1138:31]
wire _source_ok_T_9 = io_in_c_bits_source_0 == 2'h1; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_2_1 = _source_ok_T_9; // @[Parameters.scala:1138:31]
wire _source_ok_T_10 = io_in_c_bits_source_0 == 2'h2; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_2_2 = _source_ok_T_10; // @[Parameters.scala:1138:31]
wire _source_ok_T_11 = _source_ok_WIRE_2_0 | _source_ok_WIRE_2_1; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_2 = _source_ok_T_11 | _source_ok_WIRE_2_2; // @[Parameters.scala:1138:31, :1139:46]
wire [26:0] _GEN_8 = 27'hFFF << io_in_c_bits_size_0; // @[package.scala:243:71]
wire [26:0] _is_aligned_mask_T_4; // @[package.scala:243:71]
assign _is_aligned_mask_T_4 = _GEN_8; // @[package.scala:243:71]
wire [26:0] _c_first_beats1_decode_T; // @[package.scala:243:71]
assign _c_first_beats1_decode_T = _GEN_8; // @[package.scala:243:71]
wire [26:0] _c_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _c_first_beats1_decode_T_3 = _GEN_8; // @[package.scala:243:71]
wire [11:0] _is_aligned_mask_T_5 = _is_aligned_mask_T_4[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] is_aligned_mask_2 = ~_is_aligned_mask_T_5; // @[package.scala:243:{46,76}]
wire [31:0] _is_aligned_T_2 = {20'h0, io_in_c_bits_address_0[11:0] & is_aligned_mask_2}; // @[package.scala:243:46]
wire is_aligned_2 = _is_aligned_T_2 == 32'h0; // @[Edges.scala:21:{16,24}]
wire [32:0] _address_ok_T_71 = {1'h0, _address_ok_T_70}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_72 = _address_ok_T_71 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_73 = _address_ok_T_72; // @[Parameters.scala:137:46]
wire _address_ok_T_74 = _address_ok_T_73 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_0 = _address_ok_T_74; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_75 = {io_in_c_bits_address_0[31:13], io_in_c_bits_address_0[12:0] ^ 13'h1000}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_76 = {1'h0, _address_ok_T_75}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_77 = _address_ok_T_76 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_78 = _address_ok_T_77; // @[Parameters.scala:137:46]
wire _address_ok_T_79 = _address_ok_T_78 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_1 = _address_ok_T_79; // @[Parameters.scala:612:40]
wire [13:0] _GEN_9 = io_in_c_bits_address_0[13:0] ^ 14'h3000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_80 = {io_in_c_bits_address_0[31:14], _GEN_9}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_81 = {1'h0, _address_ok_T_80}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_82 = _address_ok_T_81 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_83 = _address_ok_T_82; // @[Parameters.scala:137:46]
wire _address_ok_T_84 = _address_ok_T_83 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_2 = _address_ok_T_84; // @[Parameters.scala:612:40]
wire [16:0] _GEN_10 = io_in_c_bits_address_0[16:0] ^ 17'h10000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_85 = {io_in_c_bits_address_0[31:17], _GEN_10}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_86 = {1'h0, _address_ok_T_85}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_87 = _address_ok_T_86 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_88 = _address_ok_T_87; // @[Parameters.scala:137:46]
wire _address_ok_T_89 = _address_ok_T_88 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_3 = _address_ok_T_89; // @[Parameters.scala:612:40]
wire [20:0] _GEN_11 = io_in_c_bits_address_0[20:0] ^ 21'h100000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_90 = {io_in_c_bits_address_0[31:21], _GEN_11}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_91 = {1'h0, _address_ok_T_90}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_92 = _address_ok_T_91 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_93 = _address_ok_T_92; // @[Parameters.scala:137:46]
wire _address_ok_T_94 = _address_ok_T_93 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_4 = _address_ok_T_94; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_95 = {io_in_c_bits_address_0[31:21], io_in_c_bits_address_0[20:0] ^ 21'h110000}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_96 = {1'h0, _address_ok_T_95}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_97 = _address_ok_T_96 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_98 = _address_ok_T_97; // @[Parameters.scala:137:46]
wire _address_ok_T_99 = _address_ok_T_98 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_5 = _address_ok_T_99; // @[Parameters.scala:612:40]
wire [25:0] _GEN_12 = io_in_c_bits_address_0[25:0] ^ 26'h2000000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_100 = {io_in_c_bits_address_0[31:26], _GEN_12}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_101 = {1'h0, _address_ok_T_100}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_102 = _address_ok_T_101 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_103 = _address_ok_T_102; // @[Parameters.scala:137:46]
wire _address_ok_T_104 = _address_ok_T_103 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_6 = _address_ok_T_104; // @[Parameters.scala:612:40]
wire [25:0] _GEN_13 = io_in_c_bits_address_0[25:0] ^ 26'h2010000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_105 = {io_in_c_bits_address_0[31:26], _GEN_13}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_106 = {1'h0, _address_ok_T_105}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_107 = _address_ok_T_106 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_108 = _address_ok_T_107; // @[Parameters.scala:137:46]
wire _address_ok_T_109 = _address_ok_T_108 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_7 = _address_ok_T_109; // @[Parameters.scala:612:40]
wire [27:0] _GEN_14 = io_in_c_bits_address_0[27:0] ^ 28'h8000000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_110 = {io_in_c_bits_address_0[31:28], _GEN_14}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_111 = {1'h0, _address_ok_T_110}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_112 = _address_ok_T_111 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_113 = _address_ok_T_112; // @[Parameters.scala:137:46]
wire _address_ok_T_114 = _address_ok_T_113 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_8 = _address_ok_T_114; // @[Parameters.scala:612:40]
wire [27:0] _GEN_15 = io_in_c_bits_address_0[27:0] ^ 28'hC000000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_115 = {io_in_c_bits_address_0[31:28], _GEN_15}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_116 = {1'h0, _address_ok_T_115}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_117 = _address_ok_T_116 & 33'h1FC000000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_118 = _address_ok_T_117; // @[Parameters.scala:137:46]
wire _address_ok_T_119 = _address_ok_T_118 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_9 = _address_ok_T_119; // @[Parameters.scala:612:40]
wire [28:0] _GEN_16 = io_in_c_bits_address_0[28:0] ^ 29'h10020000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_120 = {io_in_c_bits_address_0[31:29], _GEN_16}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_121 = {1'h0, _address_ok_T_120}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_122 = _address_ok_T_121 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_123 = _address_ok_T_122; // @[Parameters.scala:137:46]
wire _address_ok_T_124 = _address_ok_T_123 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_10 = _address_ok_T_124; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_125 = io_in_c_bits_address_0 ^ 32'h80000000; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_126 = {1'h0, _address_ok_T_125}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_127 = _address_ok_T_126 & 33'h1F0000000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_128 = _address_ok_T_127; // @[Parameters.scala:137:46]
wire _address_ok_T_129 = _address_ok_T_128 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_11 = _address_ok_T_129; // @[Parameters.scala:612:40]
wire _address_ok_T_130 = _address_ok_WIRE_1_0 | _address_ok_WIRE_1_1; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_131 = _address_ok_T_130 | _address_ok_WIRE_1_2; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_132 = _address_ok_T_131 | _address_ok_WIRE_1_3; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_133 = _address_ok_T_132 | _address_ok_WIRE_1_4; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_134 = _address_ok_T_133 | _address_ok_WIRE_1_5; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_135 = _address_ok_T_134 | _address_ok_WIRE_1_6; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_136 = _address_ok_T_135 | _address_ok_WIRE_1_7; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_137 = _address_ok_T_136 | _address_ok_WIRE_1_8; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_138 = _address_ok_T_137 | _address_ok_WIRE_1_9; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_139 = _address_ok_T_138 | _address_ok_WIRE_1_10; // @[Parameters.scala:612:40, :636:64]
wire address_ok_1 = _address_ok_T_139 | _address_ok_WIRE_1_11; // @[Parameters.scala:612:40, :636:64]
wire _T_2451 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_2451; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_2451; // @[Decoupled.scala:51:35]
wire [11:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [8:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] a_first_counter; // @[Edges.scala:229:27]
wire [9:0] _a_first_counter1_T = {1'h0, a_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] a_first_counter1 = _a_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [8:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [3:0] size; // @[Monitor.scala:389:22]
reg [1:0] source; // @[Monitor.scala:390:22]
reg [31:0] address; // @[Monitor.scala:391:22]
wire _T_2525 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_2525; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_2525; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_2525; // @[Decoupled.scala:51:35]
wire _d_first_T_3; // @[Decoupled.scala:51:35]
assign _d_first_T_3 = _T_2525; // @[Decoupled.scala:51:35]
wire [26:0] _GEN_17 = 27'hFFF << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_17; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_17; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_17; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T_9; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_9 = _GEN_17; // @[package.scala:243:71]
wire [11:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_3 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [8:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T = {1'h0, d_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1 = _d_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [3:0] size_1; // @[Monitor.scala:540:22]
reg [1:0] source_1; // @[Monitor.scala:541:22]
reg [2:0] sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
wire _b_first_T = io_in_b_ready_0 & io_in_b_valid_0; // @[Decoupled.scala:51:35]
wire b_first_done = _b_first_T; // @[Decoupled.scala:51:35]
reg [8:0] b_first_counter; // @[Edges.scala:229:27]
wire [9:0] _b_first_counter1_T = {1'h0, b_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] b_first_counter1 = _b_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire b_first = b_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _b_first_last_T = b_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire [8:0] _b_first_count_T = ~b_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] _b_first_counter_T = b_first ? 9'h0 : b_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21]
reg [1:0] param_2; // @[Monitor.scala:411:22]
reg [1:0] source_2; // @[Monitor.scala:413:22]
reg [31:0] address_1; // @[Monitor.scala:414:22]
wire _T_2522 = io_in_c_ready_0 & io_in_c_valid_0; // @[Decoupled.scala:51:35]
wire _c_first_T; // @[Decoupled.scala:51:35]
assign _c_first_T = _T_2522; // @[Decoupled.scala:51:35]
wire _c_first_T_1; // @[Decoupled.scala:51:35]
assign _c_first_T_1 = _T_2522; // @[Decoupled.scala:51:35]
wire [11:0] _c_first_beats1_decode_T_1 = _c_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _c_first_beats1_decode_T_2 = ~_c_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] c_first_beats1_decode = _c_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire c_first_beats1_opdata = io_in_c_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire c_first_beats1_opdata_1 = io_in_c_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [8:0] c_first_beats1 = c_first_beats1_opdata ? c_first_beats1_decode : 9'h0; // @[Edges.scala:102:36, :220:59, :221:14]
reg [8:0] c_first_counter; // @[Edges.scala:229:27]
wire [9:0] _c_first_counter1_T = {1'h0, c_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] c_first_counter1 = _c_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire c_first = c_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _c_first_last_T = c_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _c_first_last_T_1 = c_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire c_first_last = _c_first_last_T | _c_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire c_first_done = c_first_last & _c_first_T; // @[Decoupled.scala:51:35]
wire [8:0] _c_first_count_T = ~c_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] c_first_count = c_first_beats1 & _c_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _c_first_counter_T = c_first ? c_first_beats1 : c_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_3; // @[Monitor.scala:515:22]
reg [2:0] param_3; // @[Monitor.scala:516:22]
reg [3:0] size_3; // @[Monitor.scala:517:22]
reg [1:0] source_3; // @[Monitor.scala:518:22]
reg [31:0] address_2; // @[Monitor.scala:519:22]
reg [2:0] inflight; // @[Monitor.scala:614:27]
reg [11:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [23:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [11:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [8:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] a_first_counter1_1 = _a_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [11:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire [8:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1_1 = _d_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [2:0] a_set; // @[Monitor.scala:626:34]
wire [2:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [11:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [23:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [4:0] _GEN_18 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [4:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_18; // @[Monitor.scala:637:69]
wire [4:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_18; // @[Monitor.scala:637:69, :680:101]
wire [4:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_18; // @[Monitor.scala:637:69, :749:69]
wire [4:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_18; // @[Monitor.scala:637:69, :790:101]
wire [11:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [15:0] _a_opcode_lookup_T_6 = {4'h0, _a_opcode_lookup_T_1 & 12'hF}; // @[Monitor.scala:637:{44,97}]
wire [15:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [7:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [4:0] _GEN_19 = {io_in_d_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :641:65]
wire [4:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_19; // @[Monitor.scala:641:65]
wire [4:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_19; // @[Monitor.scala:641:65, :681:99]
wire [4:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_19; // @[Monitor.scala:641:65, :750:67]
wire [4:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_19; // @[Monitor.scala:641:65, :791:99]
wire [23:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [23:0] _a_size_lookup_T_6 = {16'h0, _a_size_lookup_T_1[7:0]}; // @[Monitor.scala:641:{40,91}]
wire [23:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[23:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[7:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [4:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [3:0] _GEN_20 = 4'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [3:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_20; // @[OneHot.scala:58:35]
wire [3:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_20; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[2:0] : 3'h0; // @[OneHot.scala:58:35]
wire _T_2377 = _T_2451 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_2377 ? _a_set_T[2:0] : 3'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_2377 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [4:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [4:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_2377 ? _a_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [4:0] _a_opcodes_set_T = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [34:0] _a_opcodes_set_T_1 = {31'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_2377 ? _a_opcodes_set_T_1[11:0] : 12'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [4:0] _a_sizes_set_T = {io_in_a_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :660:77]
wire [35:0] _a_sizes_set_T_1 = {31'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_2377 ? _a_sizes_set_T_1[23:0] : 24'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [2:0] d_clr; // @[Monitor.scala:664:34]
wire [2:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [11:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [23:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_21 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_21; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_21; // @[Monitor.scala:673:46, :783:46]
wire _T_2423 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [3:0] _GEN_22 = 4'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [3:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_22; // @[OneHot.scala:58:35]
wire [3:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_22; // @[OneHot.scala:58:35]
wire [3:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_22; // @[OneHot.scala:58:35]
wire [3:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_22; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_2423 & ~d_release_ack ? _d_clr_wo_ready_T[2:0] : 3'h0; // @[OneHot.scala:58:35]
wire _T_2392 = _T_2525 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_2392 ? _d_clr_T[2:0] : 3'h0; // @[OneHot.scala:58:35]
wire [46:0] _d_opcodes_clr_T_5 = 47'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_2392 ? _d_opcodes_clr_T_5[11:0] : 12'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [46:0] _d_sizes_clr_T_5 = 47'hFF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_2392 ? _d_sizes_clr_T_5[23:0] : 24'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [2:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [2:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [2:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [11:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [11:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [11:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [23:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [23:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [23:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [2:0] inflight_1; // @[Monitor.scala:726:35]
reg [11:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
reg [23:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [11:0] _c_first_beats1_decode_T_4 = _c_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _c_first_beats1_decode_T_5 = ~_c_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] c_first_beats1_decode_1 = _c_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire [8:0] c_first_beats1_1 = c_first_beats1_opdata_1 ? c_first_beats1_decode_1 : 9'h0; // @[Edges.scala:102:36, :220:59, :221:14]
reg [8:0] c_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] _c_first_counter1_T_1 = {1'h0, c_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] c_first_counter1_1 = _c_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire c_first_1 = c_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _c_first_last_T_2 = c_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _c_first_last_T_3 = c_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire c_first_last_1 = _c_first_last_T_2 | _c_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire c_first_done_1 = c_first_last_1 & _c_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] _c_first_count_T_1 = ~c_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] c_first_count_1 = c_first_beats1_1 & _c_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _c_first_counter_T_1 = c_first_1 ? c_first_beats1_1 : c_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [11:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[11:3]; // @[package.scala:243:46]
wire [8:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1_2 = _d_first_counter1_T_2[8:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [2:0] c_set; // @[Monitor.scala:738:34]
wire [2:0] c_set_wo_ready; // @[Monitor.scala:739:34]
wire [11:0] c_opcodes_set; // @[Monitor.scala:740:34]
wire [23:0] c_sizes_set; // @[Monitor.scala:741:34]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [7:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [11:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [15:0] _c_opcode_lookup_T_6 = {4'h0, _c_opcode_lookup_T_1 & 12'hF}; // @[Monitor.scala:749:{44,97}]
wire [15:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [23:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [23:0] _c_size_lookup_T_6 = {16'h0, _c_size_lookup_T_1[7:0]}; // @[Monitor.scala:750:{42,93}]
wire [23:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[23:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[7:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [3:0] c_opcodes_set_interm; // @[Monitor.scala:754:40]
wire [4:0] c_sizes_set_interm; // @[Monitor.scala:755:40]
wire _same_cycle_resp_T_3 = io_in_c_valid_0 & c_first_1; // @[Monitor.scala:36:7, :759:26, :795:44]
wire _same_cycle_resp_T_4 = io_in_c_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _same_cycle_resp_T_5 = io_in_c_bits_opcode_0[1]; // @[Monitor.scala:36:7]
wire [3:0] _GEN_23 = 4'h1 << io_in_c_bits_source_0; // @[OneHot.scala:58:35]
wire [3:0] _c_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _c_set_wo_ready_T = _GEN_23; // @[OneHot.scala:58:35]
wire [3:0] _c_set_T; // @[OneHot.scala:58:35]
assign _c_set_T = _GEN_23; // @[OneHot.scala:58:35]
assign c_set_wo_ready = _same_cycle_resp_T_3 & _same_cycle_resp_T_4 & _same_cycle_resp_T_5 ? _c_set_wo_ready_T[2:0] : 3'h0; // @[OneHot.scala:58:35]
wire _T_2464 = _T_2522 & c_first_1 & _same_cycle_resp_T_4 & _same_cycle_resp_T_5; // @[Decoupled.scala:51:35]
assign c_set = _T_2464 ? _c_set_T[2:0] : 3'h0; // @[OneHot.scala:58:35]
wire [3:0] _c_opcodes_set_interm_T = {io_in_c_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :765:53]
wire [3:0] _c_opcodes_set_interm_T_1 = {_c_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:765:{53,61}]
assign c_opcodes_set_interm = _T_2464 ? _c_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:754:40, :763:{25,36,70}, :765:{28,61}]
wire [4:0] _c_sizes_set_interm_T = {io_in_c_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :766:51]
wire [4:0] _c_sizes_set_interm_T_1 = {_c_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:766:{51,59}]
assign c_sizes_set_interm = _T_2464 ? _c_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:755:40, :763:{25,36,70}, :766:{28,59}]
wire [4:0] _c_opcodes_set_T = {1'h0, io_in_c_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :767:79]
wire [34:0] _c_opcodes_set_T_1 = {31'h0, c_opcodes_set_interm} << _c_opcodes_set_T; // @[Monitor.scala:659:54, :754:40, :767:{54,79}]
assign c_opcodes_set = _T_2464 ? _c_opcodes_set_T_1[11:0] : 12'h0; // @[Monitor.scala:740:34, :763:{25,36,70}, :767:{28,54}]
wire [4:0] _c_sizes_set_T = {io_in_c_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :768:77]
wire [35:0] _c_sizes_set_T_1 = {31'h0, c_sizes_set_interm} << _c_sizes_set_T; // @[Monitor.scala:659:54, :755:40, :768:{52,77}]
assign c_sizes_set = _T_2464 ? _c_sizes_set_T_1[23:0] : 24'h0; // @[Monitor.scala:741:34, :763:{25,36,70}, :768:{28,52}]
wire _c_probe_ack_T = io_in_c_bits_opcode_0 == 3'h4; // @[Monitor.scala:36:7, :772:47]
wire _c_probe_ack_T_1 = io_in_c_bits_opcode_0 == 3'h5; // @[Monitor.scala:36:7, :772:95]
wire c_probe_ack = _c_probe_ack_T | _c_probe_ack_T_1; // @[Monitor.scala:772:{47,71,95}]
wire [2:0] d_clr_1; // @[Monitor.scala:774:34]
wire [2:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [11:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [23:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_2495 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_2495 & d_release_ack_1 ? _d_clr_wo_ready_T_1[2:0] : 3'h0; // @[OneHot.scala:58:35]
wire _T_2477 = _T_2525 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_2477 ? _d_clr_T_1[2:0] : 3'h0; // @[OneHot.scala:58:35]
wire [46:0] _d_opcodes_clr_T_11 = 47'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_2477 ? _d_opcodes_clr_T_11[11:0] : 12'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [46:0] _d_sizes_clr_T_11 = 47'hFF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_2477 ? _d_sizes_clr_T_11[23:0] : 24'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_6 = _same_cycle_resp_T_4 & _same_cycle_resp_T_5; // @[Edges.scala:68:{36,40,51}]
wire _same_cycle_resp_T_7 = _same_cycle_resp_T_3 & _same_cycle_resp_T_6; // @[Monitor.scala:795:{44,55}]
wire _same_cycle_resp_T_8 = io_in_c_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :795:113]
wire same_cycle_resp_1 = _same_cycle_resp_T_7 & _same_cycle_resp_T_8; // @[Monitor.scala:795:{55,88,113}]
wire [2:0] _inflight_T_3 = inflight_1 | c_set; // @[Monitor.scala:726:35, :738:34, :814:35]
wire [2:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [2:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [11:0] _inflight_opcodes_T_3 = inflight_opcodes_1 | c_opcodes_set; // @[Monitor.scala:727:35, :740:34, :815:43]
wire [11:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [11:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [23:0] _inflight_sizes_T_3 = inflight_sizes_1 | c_sizes_set; // @[Monitor.scala:728:35, :741:34, :816:41]
wire [23:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [23:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
wire [32:0] _watchdog_T_2 = {1'h0, watchdog_1} + 33'h1; // @[Monitor.scala:818:27, :823:26]
wire [31:0] _watchdog_T_3 = _watchdog_T_2[31:0]; // @[Monitor.scala:823:26]
reg [7:0] inflight_2; // @[Monitor.scala:828:27]
wire [11:0] _d_first_beats1_decode_T_10 = _d_first_beats1_decode_T_9[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_11 = ~_d_first_beats1_decode_T_10; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode_3 = _d_first_beats1_decode_T_11[11:3]; // @[package.scala:243:46]
wire [8:0] d_first_beats1_3 = d_first_beats1_opdata_3 ? d_first_beats1_decode_3 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter_3; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T_3 = {1'h0, d_first_counter_3} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1_3 = _d_first_counter1_T_3[8:0]; // @[Edges.scala:230:28]
wire d_first_3 = d_first_counter_3 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_6 = d_first_counter_3 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_7 = d_first_beats1_3 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_3 = _d_first_last_T_6 | _d_first_last_T_7; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_3 = d_first_last_3 & _d_first_T_3; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T_3 = ~d_first_counter1_3; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count_3 = d_first_beats1_3 & _d_first_count_T_3; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T_3 = d_first_3 ? d_first_beats1_3 : d_first_counter1_3; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [7:0] d_set; // @[Monitor.scala:833:25]
wire _T_2531 = _T_2525 & d_first_3 & io_in_d_bits_opcode_0[2] & ~(io_in_d_bits_opcode_0[1]); // @[Decoupled.scala:51:35]
wire [7:0] _GEN_24 = {5'h0, io_in_d_bits_sink_0}; // @[OneHot.scala:58:35]
wire [7:0] _d_set_T = 8'h1 << _GEN_24; // @[OneHot.scala:58:35]
assign d_set = _T_2531 ? _d_set_T : 8'h0; // @[OneHot.scala:58:35]
wire [7:0] e_clr; // @[Monitor.scala:839:25]
wire _T_2540 = io_in_e_ready_0 & io_in_e_valid_0; // @[Decoupled.scala:51:35]
wire [7:0] _GEN_25 = {5'h0, io_in_e_bits_sink_0}; // @[OneHot.scala:58:35]
wire [7:0] _e_clr_T = 8'h1 << _GEN_25; // @[OneHot.scala:58:35]
assign e_clr = _T_2540 ? _e_clr_T : 8'h0; // @[OneHot.scala:58:35] |
Generate the Verilog code corresponding to the following Chisel files.
File RecFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import consts._
class
RecFNToRecFN(
inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int)
extends chisel3.RawModule
{
val io = IO(new Bundle {
val in = Input(Bits((inExpWidth + inSigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawIn = rawFloatFromRecFN(inExpWidth, inSigWidth, io.in);
if ((inExpWidth == outExpWidth) && (inSigWidth <= outSigWidth)) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
io.out := io.in<<(outSigWidth - inSigWidth)
io.exceptionFlags := isSigNaNRawFloat(rawIn) ## 0.U(4.W)
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
inExpWidth,
inSigWidth,
outExpWidth,
outSigWidth,
flRoundOpt_sigMSBitAlwaysZero
))
roundAnyRawFNToRecFN.io.invalidExc := isSigNaNRawFloat(rawIn)
roundAnyRawFNToRecFN.io.infiniteExc := false.B
roundAnyRawFNToRecFN.io.in := rawIn
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
}
File rawFloatFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
/*----------------------------------------------------------------------------
| In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be
| set.
*----------------------------------------------------------------------------*/
object rawFloatFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat =
{
val exp = in(expWidth + sigWidth - 1, sigWidth - 1)
val isZero = exp(expWidth, expWidth - 2) === 0.U
val isSpecial = exp(expWidth, expWidth - 1) === 3.U
val out = Wire(new RawFloat(expWidth, sigWidth))
out.isNaN := isSpecial && exp(expWidth - 2)
out.isInf := isSpecial && ! exp(expWidth - 2)
out.isZero := isZero
out.sign := in(expWidth + sigWidth)
out.sExp := exp.zext
out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0)
out
}
}
| module RecFNToRecFN_56( // @[RecFNToRecFN.scala:44:5]
input [32:0] io_in, // @[RecFNToRecFN.scala:48:16]
output [32:0] io_out // @[RecFNToRecFN.scala:48:16]
);
wire [32:0] io_in_0 = io_in; // @[RecFNToRecFN.scala:44:5]
wire io_detectTininess = 1'h1; // @[RecFNToRecFN.scala:44:5, :48:16]
wire [2:0] io_roundingMode = 3'h0; // @[RecFNToRecFN.scala:44:5, :48:16]
wire [32:0] _io_out_T = io_in_0; // @[RecFNToRecFN.scala:44:5, :64:35]
wire [4:0] _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:65:54]
wire [32:0] io_out_0; // @[RecFNToRecFN.scala:44:5]
wire [4:0] io_exceptionFlags; // @[RecFNToRecFN.scala:44:5]
wire [8:0] rawIn_exp = io_in_0[31:23]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _rawIn_isZero_T = rawIn_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire rawIn_isZero = _rawIn_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
wire rawIn_isZero_0 = rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _rawIn_isSpecial_T = rawIn_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire rawIn_isSpecial = &_rawIn_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
wire _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [9:0] _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [24:0] _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire rawIn_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [9:0] rawIn_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] rawIn_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _rawIn_out_isNaN_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _rawIn_out_isInf_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _rawIn_out_isNaN_T_1 = rawIn_isSpecial & _rawIn_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign rawIn_isNaN = _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _rawIn_out_isInf_T_1 = ~_rawIn_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _rawIn_out_isInf_T_2 = rawIn_isSpecial & _rawIn_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign rawIn_isInf = _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _rawIn_out_sign_T = io_in_0[32]; // @[rawFloatFromRecFN.scala:59:25]
assign rawIn_sign = _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _rawIn_out_sExp_T = {1'h0, rawIn_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign rawIn_sExp = _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _rawIn_out_sig_T = ~rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _rawIn_out_sig_T_1 = {1'h0, _rawIn_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [22:0] _rawIn_out_sig_T_2 = io_in_0[22:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _rawIn_out_sig_T_3 = {_rawIn_out_sig_T_1, _rawIn_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign rawIn_sig = _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
assign io_out_0 = _io_out_T; // @[RecFNToRecFN.scala:44:5, :64:35]
wire _io_exceptionFlags_T = rawIn_sig[22]; // @[rawFloatFromRecFN.scala:55:23]
wire _io_exceptionFlags_T_1 = ~_io_exceptionFlags_T; // @[common.scala:82:{49,56}]
wire _io_exceptionFlags_T_2 = rawIn_isNaN & _io_exceptionFlags_T_1; // @[rawFloatFromRecFN.scala:55:23]
assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, 4'h0}; // @[common.scala:82:46]
assign io_exceptionFlags = _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:44:5, :65:54]
assign io_out = io_out_0; // @[RecFNToRecFN.scala:44:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RecFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import consts._
class
RecFNToRecFN(
inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int)
extends chisel3.RawModule
{
val io = IO(new Bundle {
val in = Input(Bits((inExpWidth + inSigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawIn = rawFloatFromRecFN(inExpWidth, inSigWidth, io.in);
if ((inExpWidth == outExpWidth) && (inSigWidth <= outSigWidth)) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
io.out := io.in<<(outSigWidth - inSigWidth)
io.exceptionFlags := isSigNaNRawFloat(rawIn) ## 0.U(4.W)
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
inExpWidth,
inSigWidth,
outExpWidth,
outSigWidth,
flRoundOpt_sigMSBitAlwaysZero
))
roundAnyRawFNToRecFN.io.invalidExc := isSigNaNRawFloat(rawIn)
roundAnyRawFNToRecFN.io.infiniteExc := false.B
roundAnyRawFNToRecFN.io.in := rawIn
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
}
File rawFloatFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
/*----------------------------------------------------------------------------
| In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be
| set.
*----------------------------------------------------------------------------*/
object rawFloatFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat =
{
val exp = in(expWidth + sigWidth - 1, sigWidth - 1)
val isZero = exp(expWidth, expWidth - 2) === 0.U
val isSpecial = exp(expWidth, expWidth - 1) === 3.U
val out = Wire(new RawFloat(expWidth, sigWidth))
out.isNaN := isSpecial && exp(expWidth - 2)
out.isInf := isSpecial && ! exp(expWidth - 2)
out.isZero := isZero
out.sign := in(expWidth + sigWidth)
out.sExp := exp.zext
out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0)
out
}
}
| module RecFNToRecFN_218( // @[RecFNToRecFN.scala:44:5]
input [32:0] io_in, // @[RecFNToRecFN.scala:48:16]
output [32:0] io_out // @[RecFNToRecFN.scala:48:16]
);
wire [32:0] io_in_0 = io_in; // @[RecFNToRecFN.scala:44:5]
wire io_detectTininess = 1'h1; // @[RecFNToRecFN.scala:44:5, :48:16]
wire [2:0] io_roundingMode = 3'h0; // @[RecFNToRecFN.scala:44:5, :48:16]
wire [32:0] _io_out_T = io_in_0; // @[RecFNToRecFN.scala:44:5, :64:35]
wire [4:0] _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:65:54]
wire [32:0] io_out_0; // @[RecFNToRecFN.scala:44:5]
wire [4:0] io_exceptionFlags; // @[RecFNToRecFN.scala:44:5]
wire [8:0] rawIn_exp = io_in_0[31:23]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _rawIn_isZero_T = rawIn_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire rawIn_isZero = _rawIn_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
wire rawIn_isZero_0 = rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _rawIn_isSpecial_T = rawIn_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire rawIn_isSpecial = &_rawIn_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
wire _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [9:0] _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [24:0] _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire rawIn_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [9:0] rawIn_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] rawIn_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _rawIn_out_isNaN_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _rawIn_out_isInf_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _rawIn_out_isNaN_T_1 = rawIn_isSpecial & _rawIn_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign rawIn_isNaN = _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _rawIn_out_isInf_T_1 = ~_rawIn_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _rawIn_out_isInf_T_2 = rawIn_isSpecial & _rawIn_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign rawIn_isInf = _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _rawIn_out_sign_T = io_in_0[32]; // @[rawFloatFromRecFN.scala:59:25]
assign rawIn_sign = _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _rawIn_out_sExp_T = {1'h0, rawIn_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign rawIn_sExp = _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _rawIn_out_sig_T = ~rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _rawIn_out_sig_T_1 = {1'h0, _rawIn_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [22:0] _rawIn_out_sig_T_2 = io_in_0[22:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _rawIn_out_sig_T_3 = {_rawIn_out_sig_T_1, _rawIn_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign rawIn_sig = _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
assign io_out_0 = _io_out_T; // @[RecFNToRecFN.scala:44:5, :64:35]
wire _io_exceptionFlags_T = rawIn_sig[22]; // @[rawFloatFromRecFN.scala:55:23]
wire _io_exceptionFlags_T_1 = ~_io_exceptionFlags_T; // @[common.scala:82:{49,56}]
wire _io_exceptionFlags_T_2 = rawIn_isNaN & _io_exceptionFlags_T_1; // @[rawFloatFromRecFN.scala:55:23]
assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, 4'h0}; // @[common.scala:82:46]
assign io_exceptionFlags = _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:44:5, :65:54]
assign io_out = io_out_0; // @[RecFNToRecFN.scala:44:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_358( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19]
wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_48( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [27:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire [12:0] _GEN = {10'h0, io_in_a_bits_size}; // @[package.scala:243:71]
wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [6:0] source; // @[Monitor.scala:390:22]
reg [27:0] address; // @[Monitor.scala:391:22]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [6:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [115:0] inflight; // @[Monitor.scala:614:27]
reg [463:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [463:0] inflight_sizes; // @[Monitor.scala:618:33]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire [127:0] _GEN_0 = {121'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35]
wire _GEN_1 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35]
wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46]
wire _GEN_2 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74]
wire [127:0] _GEN_3 = {121'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
reg [115:0] inflight_1; // @[Monitor.scala:726:35]
reg [463:0] inflight_sizes_1; // @[Monitor.scala:728:35]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w4_d3_i0_25( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input [3:0] io_d, // @[ShiftReg.scala:36:14]
output [3:0] io_q // @[ShiftReg.scala:36:14]
);
wire [3:0] io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_2 = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_4 = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_6 = reset; // @[SynchronizerReg.scala:86:21]
wire [3:0] _io_q_T; // @[SynchronizerReg.scala:90:14]
wire [3:0] io_q_0; // @[SynchronizerReg.scala:80:7]
wire _output_T_1 = io_d_0[0]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire _output_T_3 = io_d_0[1]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_1; // @[ShiftReg.scala:48:24]
wire _output_T_5 = io_d_0[2]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_2; // @[ShiftReg.scala:48:24]
wire _output_T_7 = io_d_0[3]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_3; // @[ShiftReg.scala:48:24]
wire [1:0] io_q_lo = {output_1, output_0}; // @[SynchronizerReg.scala:90:14]
wire [1:0] io_q_hi = {output_3, output_2}; // @[SynchronizerReg.scala:90:14]
assign _io_q_T = {io_q_hi, io_q_lo}; // @[SynchronizerReg.scala:90:14]
assign io_q_0 = _io_q_T; // @[SynchronizerReg.scala:80:7, :90:14]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_245 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_246 output_chain_1 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T_2), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_3), // @[SynchronizerReg.scala:87:41]
.io_q (output_1)
); // @[ShiftReg.scala:45:23]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_247 output_chain_2 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T_4), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_5), // @[SynchronizerReg.scala:87:41]
.io_q (output_2)
); // @[ShiftReg.scala:45:23]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_248 output_chain_3 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T_6), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_7), // @[SynchronizerReg.scala:87:41]
.io_q (output_3)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module MacUnit_45( // @[PE.scala:14:7]
input clock, // @[PE.scala:14:7]
input reset, // @[PE.scala:14:7]
input [7:0] io_in_a, // @[PE.scala:16:14]
input [7:0] io_in_b, // @[PE.scala:16:14]
input [19:0] io_in_c, // @[PE.scala:16:14]
output [19:0] io_out_d // @[PE.scala:16:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7]
wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7]
wire [19:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7]
wire [19:0] _io_out_d_T_3; // @[Arithmetic.scala:93:54]
wire [19:0] io_out_d_0; // @[PE.scala:14:7]
wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7]
wire [20:0] _io_out_d_T_1 = {{5{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[19], io_in_c_0}; // @[PE.scala:14:7]
wire [19:0] _io_out_d_T_2 = _io_out_d_T_1[19:0]; // @[Arithmetic.scala:93:54]
assign _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54]
assign io_out_d_0 = _io_out_d_T_3; // @[PE.scala:14:7]
assign io_out_d = io_out_d_0; // @[PE.scala:14:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RoundAnyRawFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.Fill
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundAnyRawFNToRecFN(
inExpWidth: Int,
inSigWidth: Int,
outExpWidth: Int,
outSigWidth: Int,
options: Int
)
extends RawModule
{
override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(inExpWidth, inSigWidth))
// (allowed exponent range has limits)
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0)
val effectiveInSigWidth =
if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1
val neverUnderflows =
((options &
(flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact)
) != 0) ||
(inExpWidth < outExpWidth)
val neverOverflows =
((options & flRoundOpt_neverOverflows) != 0) ||
(inExpWidth < outExpWidth)
val outNaNExp = BigInt(7)<<(outExpWidth - 2)
val outInfExp = BigInt(6)<<(outExpWidth - 2)
val outMaxFiniteExp = outInfExp - 1
val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2
val outMinNonzeroExp = outMinNormExp - outSigWidth + 1
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
val roundMagUp =
(roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sAdjustedExp =
if (inExpWidth < outExpWidth)
(io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
)(outExpWidth, 0).zext
else if (inExpWidth == outExpWidth)
io.in.sExp
else
io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
val adjustedSig =
if (inSigWidth <= outSigWidth + 2)
io.in.sig<<(outSigWidth - inSigWidth + 2)
else
(io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ##
io.in.sig(inSigWidth - outSigWidth - 2, 0).orR
)
val doShiftSigDown1 =
if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2)
val common_expOut = Wire(UInt((outExpWidth + 1).W))
val common_fractOut = Wire(UInt((outSigWidth - 1).W))
val common_overflow = Wire(Bool())
val common_totalUnderflow = Wire(Bool())
val common_underflow = Wire(Bool())
val common_inexact = Wire(Bool())
if (
neverOverflows && neverUnderflows
&& (effectiveInSigWidth <= outSigWidth)
) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1
common_fractOut :=
Mux(doShiftSigDown1,
adjustedSig(outSigWidth + 1, 3),
adjustedSig(outSigWidth, 2)
)
common_overflow := false.B
common_totalUnderflow := false.B
common_underflow := false.B
common_inexact := false.B
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundMask =
if (neverUnderflows)
0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W)
else
(lowMask(
sAdjustedExp(outExpWidth, 0),
outMinNormExp - outSigWidth - 1,
outMinNormExp
) | doShiftSigDown1) ##
3.U(2.W)
val shiftedRoundMask = 0.U(1.W) ## roundMask>>1
val roundPosMask = ~shiftedRoundMask & roundMask
val roundPosBit = (adjustedSig & roundPosMask).orR
val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR
val anyRound = roundPosBit || anyRoundExtra
val roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
roundPosBit) ||
(roundMagUp && anyRound)
val roundedSig: Bits =
Mux(roundIncr,
(((adjustedSig | roundMask)>>2) +& 1.U) &
~Mux(roundingMode_near_even && roundPosBit &&
! anyRoundExtra,
roundMask>>1,
0.U((outSigWidth + 2).W)
),
(adjustedSig & ~roundMask)>>2 |
Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U)
)
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext
common_expOut := sRoundedExp(outExpWidth, 0)
common_fractOut :=
Mux(doShiftSigDown1,
roundedSig(outSigWidth - 1, 1),
roundedSig(outSigWidth - 2, 0)
)
common_overflow :=
(if (neverOverflows) false.B else
//*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?:
(sRoundedExp>>(outExpWidth - 1) >= 3.S))
common_totalUnderflow :=
(if (neverUnderflows) false.B else
//*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?:
(sRoundedExp < outMinNonzeroExp.S))
val unboundedRange_roundPosBit =
Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1))
val unboundedRange_anyRound =
(doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR
val unboundedRange_roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
unboundedRange_roundPosBit) ||
(roundMagUp && unboundedRange_anyRound)
val roundCarry =
Mux(doShiftSigDown1,
roundedSig(outSigWidth + 1),
roundedSig(outSigWidth)
)
common_underflow :=
(if (neverUnderflows) false.B else
common_totalUnderflow ||
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
(anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) &&
Mux(doShiftSigDown1, roundMask(3), roundMask(2)) &&
! ((io.detectTininess === tininess_afterRounding) &&
! Mux(doShiftSigDown1,
roundMask(4),
roundMask(3)
) &&
roundCarry && roundPosBit &&
unboundedRange_roundIncr)))
common_inexact := common_totalUnderflow || anyRound
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val isNaNOut = io.invalidExc || io.in.isNaN
val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf
val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero
val overflow = commonCase && common_overflow
val underflow = commonCase && common_underflow
val inexact = overflow || (commonCase && common_inexact)
val overflow_roundMagUp =
roundingMode_near_even || roundingMode_near_maxMag || roundMagUp
val pegMinNonzeroMagOut =
commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd)
val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp
val notNaN_isInfOut =
notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp)
val signOut = Mux(isNaNOut, false.B, io.in.sign)
val expOut =
(common_expOut &
~Mux(io.in.isZero || common_totalUnderflow,
(BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMinNonzeroMagOut,
~outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMaxFiniteMagOut,
(BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W),
0.U
) &
~Mux(notNaN_isInfOut,
(BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
)) |
Mux(pegMinNonzeroMagOut,
outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) |
Mux(pegMaxFiniteMagOut,
outMaxFiniteExp.U((outExpWidth + 1).W),
0.U
) |
Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) |
Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U)
val fractOut =
Mux(isNaNOut || io.in.isZero || common_totalUnderflow,
Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U),
common_fractOut
) |
Fill(outSigWidth - 1, pegMaxFiniteMagOut)
io.out := signOut ## expOut ## fractOut
io.exceptionFlags :=
io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int)
extends RawModule
{
override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(expWidth, sigWidth + 2))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
expWidth, sigWidth + 2, expWidth, sigWidth, options))
roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc
roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc
roundAnyRawFNToRecFN.io.in := io.in
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
| module RoundAnyRawFNToRecFN_ie6_is32_oe8_os24_8( // @[RoundAnyRawFNToRecFN.scala:48:5]
input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [7:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [32:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:58:16]
output [32:0] io_out, // @[RoundAnyRawFNToRecFN.scala:58:16]
output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:58:16]
);
wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [7:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [32:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [24:0] _roundMask_T = 25'h0; // @[RoundAnyRawFNToRecFN.scala:153:36]
wire [8:0] _expOut_T_4 = 9'h194; // @[RoundAnyRawFNToRecFN.scala:258:19]
wire [26:0] roundMask = 27'h3; // @[RoundAnyRawFNToRecFN.scala:153:55]
wire [27:0] _shiftedRoundMask_T = 28'h3; // @[RoundAnyRawFNToRecFN.scala:162:41]
wire [26:0] shiftedRoundMask = 27'h1; // @[RoundAnyRawFNToRecFN.scala:162:53]
wire [26:0] _roundPosMask_T = 27'h7FFFFFE; // @[RoundAnyRawFNToRecFN.scala:163:28]
wire [26:0] roundPosMask = 27'h2; // @[RoundAnyRawFNToRecFN.scala:163:46]
wire [26:0] _roundedSig_T_10 = 27'h7FFFFFC; // @[RoundAnyRawFNToRecFN.scala:180:32]
wire [25:0] _roundedSig_T_6 = 26'h1; // @[RoundAnyRawFNToRecFN.scala:177:35, :181:67]
wire [25:0] _roundedSig_T_14 = 26'h1; // @[RoundAnyRawFNToRecFN.scala:177:35, :181:67]
wire [25:0] _roundedSig_T_15 = 26'h0; // @[RoundAnyRawFNToRecFN.scala:181:24]
wire [8:0] _expOut_T_6 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14, :265:14]
wire [8:0] _expOut_T_9 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14, :265:14]
wire [8:0] _expOut_T_12 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14, :265:14]
wire [8:0] _expOut_T_5 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:257:18]
wire [8:0] _expOut_T_8 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:261:18]
wire [8:0] _expOut_T_11 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:265:18]
wire [8:0] _expOut_T_14 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:269:16]
wire [8:0] _expOut_T_16 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:273:16]
wire [8:0] _expOut_T_18 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:277:16]
wire [8:0] _expOut_T_20 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:278:16]
wire [22:0] _fractOut_T_2 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:281:16, :284:13]
wire [22:0] _fractOut_T_4 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:281:16, :284:13]
wire [1:0] _io_exceptionFlags_T = 2'h0; // @[RoundAnyRawFNToRecFN.scala:288:23]
wire [3:0] _io_exceptionFlags_T_2 = 4'h0; // @[RoundAnyRawFNToRecFN.scala:288:53]
wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire roundingMode_near_even = 1'h1; // @[RoundAnyRawFNToRecFN.scala:90:53]
wire _roundIncr_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:169:38]
wire _unboundedRange_roundIncr_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:207:38]
wire _commonCase_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:237:22]
wire _commonCase_T_1 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:237:36]
wire _commonCase_T_2 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:237:33]
wire _overflow_roundMagUp_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:243:32]
wire overflow_roundMagUp = 1'h1; // @[RoundAnyRawFNToRecFN.scala:243:60]
wire [2:0] io_roundingMode = 3'h0; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :288:41]
wire [2:0] _io_exceptionFlags_T_1 = 3'h0; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :288:41]
wire io_invalidExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isNaN = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isInf = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire roundingMode_minMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:91:53]
wire roundingMode_min = 1'h0; // @[RoundAnyRawFNToRecFN.scala:92:53]
wire roundingMode_max = 1'h0; // @[RoundAnyRawFNToRecFN.scala:93:53]
wire roundingMode_near_maxMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:94:53]
wire roundingMode_odd = 1'h0; // @[RoundAnyRawFNToRecFN.scala:95:53]
wire _roundMagUp_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:27]
wire _roundMagUp_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:63]
wire roundMagUp = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:42]
wire common_overflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:124:37]
wire common_totalUnderflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:125:37]
wire common_underflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:126:37]
wire _roundIncr_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:171:29]
wire _roundedSig_T_13 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:181:42]
wire _unboundedRange_anyRound_T_1 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:205:30]
wire _unboundedRange_roundIncr_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:209:29]
wire isNaNOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:235:34]
wire notNaN_isSpecialInfOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:236:49]
wire overflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:238:32]
wire underflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:239:32]
wire _pegMinNonzeroMagOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:20]
wire _pegMinNonzeroMagOut_T_1 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:60]
wire pegMinNonzeroMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:45]
wire _pegMaxFiniteMagOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:42]
wire pegMaxFiniteMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:39]
wire _notNaN_isInfOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:248:45]
wire notNaN_isInfOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:248:32]
wire _expOut_T = io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :253:32]
wire _fractOut_T = io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :280:22]
wire signOut = io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :250:22]
wire [32:0] _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:286:33]
wire [4:0] _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:288:66]
wire [32:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire _roundMagUp_T_1 = ~io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :98:66]
wire [9:0] _sAdjustedExp_T = {{2{io_in_sExp_0[7]}}, io_in_sExp_0} + 10'hC0; // @[RoundAnyRawFNToRecFN.scala:48:5, :104:25]
wire [8:0] _sAdjustedExp_T_1 = _sAdjustedExp_T[8:0]; // @[RoundAnyRawFNToRecFN.scala:104:25, :106:14]
wire [9:0] sAdjustedExp = {1'h0, _sAdjustedExp_T_1}; // @[RoundAnyRawFNToRecFN.scala:106:{14,31}]
wire [25:0] _adjustedSig_T = io_in_sig_0[32:7]; // @[RoundAnyRawFNToRecFN.scala:48:5, :116:23]
wire [6:0] _adjustedSig_T_1 = io_in_sig_0[6:0]; // @[RoundAnyRawFNToRecFN.scala:48:5, :117:26]
wire _adjustedSig_T_2 = |_adjustedSig_T_1; // @[RoundAnyRawFNToRecFN.scala:117:{26,60}]
wire [26:0] adjustedSig = {_adjustedSig_T, _adjustedSig_T_2}; // @[RoundAnyRawFNToRecFN.scala:116:{23,66}, :117:60]
wire [8:0] _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:187:37]
wire [8:0] common_expOut; // @[RoundAnyRawFNToRecFN.scala:122:31]
wire [22:0] _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:189:16]
wire [22:0] common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31]
wire _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:230:49]
wire common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37]
wire [26:0] _roundPosBit_T = adjustedSig & 27'h2; // @[RoundAnyRawFNToRecFN.scala:116:66, :163:46, :164:40]
wire roundPosBit = |_roundPosBit_T; // @[RoundAnyRawFNToRecFN.scala:164:{40,56}]
wire _roundIncr_T_1 = roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :169:67]
wire _roundedSig_T_3 = roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :175:49]
wire [26:0] _anyRoundExtra_T = adjustedSig & 27'h1; // @[RoundAnyRawFNToRecFN.scala:116:66, :162:53, :165:42]
wire anyRoundExtra = |_anyRoundExtra_T; // @[RoundAnyRawFNToRecFN.scala:165:{42,62}]
wire anyRound = roundPosBit | anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:164:56, :165:62, :166:36]
assign _common_inexact_T = anyRound; // @[RoundAnyRawFNToRecFN.scala:166:36, :230:49]
wire roundIncr = _roundIncr_T_1; // @[RoundAnyRawFNToRecFN.scala:169:67, :170:31]
wire [26:0] _roundedSig_T = adjustedSig | 27'h3; // @[RoundAnyRawFNToRecFN.scala:116:66, :153:55, :174:32]
wire [24:0] _roundedSig_T_1 = _roundedSig_T[26:2]; // @[RoundAnyRawFNToRecFN.scala:174:{32,44}]
wire [25:0] _roundedSig_T_2 = {1'h0, _roundedSig_T_1} + 26'h1; // @[RoundAnyRawFNToRecFN.scala:174:{44,49}, :177:35, :181:67]
wire _roundedSig_T_4 = ~anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:165:62, :176:30]
wire _roundedSig_T_5 = _roundedSig_T_3 & _roundedSig_T_4; // @[RoundAnyRawFNToRecFN.scala:175:{49,64}, :176:30]
wire [25:0] _roundedSig_T_7 = {25'h0, _roundedSig_T_5}; // @[RoundAnyRawFNToRecFN.scala:175:{25,64}]
wire [25:0] _roundedSig_T_8 = ~_roundedSig_T_7; // @[RoundAnyRawFNToRecFN.scala:175:{21,25}]
wire [25:0] _roundedSig_T_9 = _roundedSig_T_2 & _roundedSig_T_8; // @[RoundAnyRawFNToRecFN.scala:174:{49,57}, :175:21]
wire [26:0] _roundedSig_T_11 = adjustedSig & 27'h7FFFFFC; // @[RoundAnyRawFNToRecFN.scala:116:66, :180:{30,32}]
wire [24:0] _roundedSig_T_12 = _roundedSig_T_11[26:2]; // @[RoundAnyRawFNToRecFN.scala:180:{30,43}]
wire [25:0] _roundedSig_T_16 = {1'h0, _roundedSig_T_12}; // @[RoundAnyRawFNToRecFN.scala:180:{43,47}]
wire [25:0] roundedSig = roundIncr ? _roundedSig_T_9 : _roundedSig_T_16; // @[RoundAnyRawFNToRecFN.scala:170:31, :173:16, :174:57, :180:47]
wire [1:0] _sRoundedExp_T = roundedSig[25:24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :185:54]
wire [2:0] _sRoundedExp_T_1 = {1'h0, _sRoundedExp_T}; // @[RoundAnyRawFNToRecFN.scala:185:{54,76}]
wire [10:0] sRoundedExp = {sAdjustedExp[9], sAdjustedExp} + {{8{_sRoundedExp_T_1[2]}}, _sRoundedExp_T_1}; // @[RoundAnyRawFNToRecFN.scala:106:31, :185:{40,76}]
assign _common_expOut_T = sRoundedExp[8:0]; // @[RoundAnyRawFNToRecFN.scala:185:40, :187:37]
assign common_expOut = _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:122:31, :187:37]
wire [22:0] _common_fractOut_T = roundedSig[23:1]; // @[RoundAnyRawFNToRecFN.scala:173:16, :190:27]
wire [22:0] _common_fractOut_T_1 = roundedSig[22:0]; // @[RoundAnyRawFNToRecFN.scala:173:16, :191:27]
assign _common_fractOut_T_2 = _common_fractOut_T_1; // @[RoundAnyRawFNToRecFN.scala:189:16, :191:27]
assign common_fractOut = _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:123:31, :189:16]
wire _unboundedRange_roundPosBit_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:116:66, :203:45]
wire _unboundedRange_anyRound_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:116:66, :203:45, :205:44]
wire _unboundedRange_roundPosBit_T_1 = adjustedSig[1]; // @[RoundAnyRawFNToRecFN.scala:116:66, :203:61]
wire unboundedRange_roundPosBit = _unboundedRange_roundPosBit_T_1; // @[RoundAnyRawFNToRecFN.scala:203:{16,61}]
wire _unboundedRange_roundIncr_T_1 = unboundedRange_roundPosBit; // @[RoundAnyRawFNToRecFN.scala:203:16, :207:67]
wire [1:0] _unboundedRange_anyRound_T_2 = adjustedSig[1:0]; // @[RoundAnyRawFNToRecFN.scala:116:66, :205:63]
wire _unboundedRange_anyRound_T_3 = |_unboundedRange_anyRound_T_2; // @[RoundAnyRawFNToRecFN.scala:205:{63,70}]
wire unboundedRange_anyRound = _unboundedRange_anyRound_T_3; // @[RoundAnyRawFNToRecFN.scala:205:{49,70}]
wire unboundedRange_roundIncr = _unboundedRange_roundIncr_T_1; // @[RoundAnyRawFNToRecFN.scala:207:67, :208:46]
wire _roundCarry_T = roundedSig[25]; // @[RoundAnyRawFNToRecFN.scala:173:16, :212:27]
wire _roundCarry_T_1 = roundedSig[24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :213:27]
wire roundCarry = _roundCarry_T_1; // @[RoundAnyRawFNToRecFN.scala:211:16, :213:27]
assign common_inexact = _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:127:37, :230:49]
wire _commonCase_T_3 = ~io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :237:64]
wire commonCase = _commonCase_T_3; // @[RoundAnyRawFNToRecFN.scala:237:{61,64}]
wire _inexact_T = commonCase & common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37, :237:61, :240:43]
wire inexact = _inexact_T; // @[RoundAnyRawFNToRecFN.scala:240:{28,43}]
wire [8:0] _expOut_T_1 = _expOut_T ? 9'h1C0 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:{18,32}]
wire [8:0] _expOut_T_2 = ~_expOut_T_1; // @[RoundAnyRawFNToRecFN.scala:253:{14,18}]
wire [8:0] _expOut_T_3 = common_expOut & _expOut_T_2; // @[RoundAnyRawFNToRecFN.scala:122:31, :252:24, :253:14]
wire [8:0] _expOut_T_7 = _expOut_T_3; // @[RoundAnyRawFNToRecFN.scala:252:24, :256:17]
wire [8:0] _expOut_T_10 = _expOut_T_7; // @[RoundAnyRawFNToRecFN.scala:256:17, :260:17]
wire [8:0] _expOut_T_13 = _expOut_T_10; // @[RoundAnyRawFNToRecFN.scala:260:17, :264:17]
wire [8:0] _expOut_T_15 = _expOut_T_13; // @[RoundAnyRawFNToRecFN.scala:264:17, :268:18]
wire [8:0] _expOut_T_17 = _expOut_T_15; // @[RoundAnyRawFNToRecFN.scala:268:18, :272:15]
wire [8:0] _expOut_T_19 = _expOut_T_17; // @[RoundAnyRawFNToRecFN.scala:272:15, :276:15]
wire [8:0] expOut = _expOut_T_19; // @[RoundAnyRawFNToRecFN.scala:276:15, :277:73]
wire _fractOut_T_1 = _fractOut_T; // @[RoundAnyRawFNToRecFN.scala:280:{22,38}]
wire [22:0] _fractOut_T_3 = _fractOut_T_1 ? 23'h0 : common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31, :280:{12,38}, :281:16, :284:13]
wire [22:0] fractOut = _fractOut_T_3; // @[RoundAnyRawFNToRecFN.scala:280:12, :283:11]
wire [9:0] _io_out_T = {signOut, expOut}; // @[RoundAnyRawFNToRecFN.scala:250:22, :277:73, :286:23]
assign _io_out_T_1 = {_io_out_T, fractOut}; // @[RoundAnyRawFNToRecFN.scala:283:11, :286:{23,33}]
assign io_out_0 = _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:48:5, :286:33]
assign _io_exceptionFlags_T_3 = {4'h0, inexact}; // @[RoundAnyRawFNToRecFN.scala:240:28, :288:{53,66}]
assign io_exceptionFlags_0 = _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:66]
assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_74( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [4:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [4:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [4:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [31:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [4:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7]
wire _source_ok_T = 1'h0; // @[Parameters.scala:54:10]
wire _source_ok_T_6 = 1'h0; // @[Parameters.scala:54:10]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire _source_ok_T_7 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_8 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:54:67]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_first_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_first_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_first_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_first_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_set_wo_ready_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_set_wo_ready_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_opcodes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_opcodes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_sizes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_sizes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_opcodes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_opcodes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_sizes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_sizes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_probe_ack_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_probe_ack_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_probe_ack_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_probe_ack_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_4_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_5_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_first_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_first_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_first_WIRE_2_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_first_WIRE_3_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_set_wo_ready_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_set_wo_ready_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_set_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_set_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_opcodes_set_interm_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_opcodes_set_interm_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_sizes_set_interm_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_sizes_set_interm_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_opcodes_set_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_opcodes_set_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_sizes_set_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_sizes_set_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_probe_ack_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_probe_ack_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_probe_ack_WIRE_2_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_probe_ack_WIRE_3_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _same_cycle_resp_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _same_cycle_resp_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _same_cycle_resp_WIRE_2_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _same_cycle_resp_WIRE_3_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _same_cycle_resp_WIRE_4_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _same_cycle_resp_WIRE_5_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [258:0] _c_opcodes_set_T_1 = 259'h0; // @[Monitor.scala:767:54]
wire [258:0] _c_sizes_set_T_1 = 259'h0; // @[Monitor.scala:768:52]
wire [7:0] _c_opcodes_set_T = 8'h0; // @[Monitor.scala:767:79]
wire [7:0] _c_sizes_set_T = 8'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [31:0] _c_set_wo_ready_T = 32'h1; // @[OneHot.scala:58:35]
wire [31:0] _c_set_T = 32'h1; // @[OneHot.scala:58:35]
wire [79:0] c_opcodes_set = 80'h0; // @[Monitor.scala:740:34]
wire [79:0] c_sizes_set = 80'h0; // @[Monitor.scala:741:34]
wire [19:0] c_set = 20'h0; // @[Monitor.scala:738:34]
wire [19:0] c_set_wo_ready = 20'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [4:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_uncommonBits_T_1 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] source_ok_uncommonBits = _source_ok_uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_4 = source_ok_uncommonBits < 5'h14; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_5 = _source_ok_T_4; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_0 = _source_ok_T_5; // @[Parameters.scala:1138:31]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [31:0] _is_aligned_T = {26'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 32'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [4:0] uncommonBits = _uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_1 = _uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_2 = _uncommonBits_T_2; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_3 = _uncommonBits_T_3; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_4 = _uncommonBits_T_4; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_5 = _uncommonBits_T_5; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_6 = _uncommonBits_T_6; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_7 = _uncommonBits_T_7; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_8 = _uncommonBits_T_8; // @[Parameters.scala:52:{29,56}]
wire [4:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_10 = source_ok_uncommonBits_1 < 5'h14; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_11 = _source_ok_T_10; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_1_0 = _source_ok_T_11; // @[Parameters.scala:1138:31]
wire _T_732 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_732; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_732; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [4:0] source; // @[Monitor.scala:390:22]
reg [31:0] address; // @[Monitor.scala:391:22]
wire _T_805 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_805; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_805; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_805; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [4:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [19:0] inflight; // @[Monitor.scala:614:27]
reg [79:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [79:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [19:0] a_set; // @[Monitor.scala:626:34]
wire [19:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [79:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [79:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [7:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [7:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [7:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [7:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [7:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [7:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [7:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [7:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [7:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [79:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [79:0] _a_opcode_lookup_T_6 = {76'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [79:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[79:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [79:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [79:0] _a_size_lookup_T_6 = {76'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [79:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[79:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [31:0] _GEN_2 = 32'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [31:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [31:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[19:0] : 20'h0; // @[OneHot.scala:58:35]
wire _T_658 = _T_732 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_658 ? _a_set_T[19:0] : 20'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_658 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_658 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [7:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [7:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [7:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [258:0] _a_opcodes_set_T_1 = {255'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_658 ? _a_opcodes_set_T_1[79:0] : 80'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [258:0] _a_sizes_set_T_1 = {255'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_658 ? _a_sizes_set_T_1[79:0] : 80'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [19:0] d_clr; // @[Monitor.scala:664:34]
wire [19:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [79:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [79:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_704 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [31:0] _GEN_5 = 32'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [31:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [31:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [31:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [31:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_704 & ~d_release_ack ? _d_clr_wo_ready_T[19:0] : 20'h0; // @[OneHot.scala:58:35]
wire _T_673 = _T_805 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_673 ? _d_clr_T[19:0] : 20'h0; // @[OneHot.scala:58:35]
wire [270:0] _d_opcodes_clr_T_5 = 271'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_673 ? _d_opcodes_clr_T_5[79:0] : 80'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [270:0] _d_sizes_clr_T_5 = 271'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_673 ? _d_sizes_clr_T_5[79:0] : 80'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [19:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [19:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [19:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [79:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [79:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [79:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [79:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [79:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [79:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [19:0] inflight_1; // @[Monitor.scala:726:35]
wire [19:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [79:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [79:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [79:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [79:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [79:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [79:0] _c_opcode_lookup_T_6 = {76'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [79:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[79:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [79:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [79:0] _c_size_lookup_T_6 = {76'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [79:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[79:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [19:0] d_clr_1; // @[Monitor.scala:774:34]
wire [19:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [79:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [79:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_776 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_776 & d_release_ack_1 ? _d_clr_wo_ready_T_1[19:0] : 20'h0; // @[OneHot.scala:58:35]
wire _T_758 = _T_805 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_758 ? _d_clr_T_1[19:0] : 20'h0; // @[OneHot.scala:58:35]
wire [270:0] _d_opcodes_clr_T_11 = 271'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_758 ? _d_opcodes_clr_T_11[79:0] : 80'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [270:0] _d_sizes_clr_T_11 = 271'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_758 ? _d_sizes_clr_T_11[79:0] : 80'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 5'h0; // @[Monitor.scala:36:7, :795:113]
wire [19:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [19:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [79:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [79:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [79:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [79:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module MacUnit_200( // @[PE.scala:14:7]
input clock, // @[PE.scala:14:7]
input reset, // @[PE.scala:14:7]
input [7:0] io_in_a, // @[PE.scala:16:14]
input [7:0] io_in_b, // @[PE.scala:16:14]
input [19:0] io_in_c, // @[PE.scala:16:14]
output [19:0] io_out_d // @[PE.scala:16:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7]
wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7]
wire [19:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7]
wire [19:0] _io_out_d_T_3; // @[Arithmetic.scala:93:54]
wire [19:0] io_out_d_0; // @[PE.scala:14:7]
wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7]
wire [20:0] _io_out_d_T_1 = {{5{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[19], io_in_c_0}; // @[PE.scala:14:7]
wire [19:0] _io_out_d_T_2 = _io_out_d_T_1[19:0]; // @[Arithmetic.scala:93:54]
assign _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54]
assign io_out_d_0 = _io_out_d_T_3; // @[PE.scala:14:7]
assign io_out_d = io_out_d_0; // @[PE.scala:14:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_187( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
output io_q // @[ShiftReg.scala:36:14]
);
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire io_d = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41]
wire _output_T_1 = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_339 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File ALU.scala:
// See LICENSE.SiFive for license details.
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util.{BitPat, Fill, Cat, Reverse, PriorityEncoderOH, PopCount, MuxLookup}
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.tile.CoreModule
import freechips.rocketchip.util._
object ALU {
val SZ_ALU_FN = 5
def FN_X = BitPat("b?????")
def FN_ADD = 0.U
def FN_SL = 1.U
def FN_SEQ = 2.U
def FN_SNE = 3.U
def FN_XOR = 4.U
def FN_SR = 5.U
def FN_OR = 6.U
def FN_AND = 7.U
def FN_CZEQZ = 8.U
def FN_CZNEZ = 9.U
def FN_SUB = 10.U
def FN_SRA = 11.U
def FN_SLT = 12.U
def FN_SGE = 13.U
def FN_SLTU = 14.U
def FN_SGEU = 15.U
def FN_UNARY = 16.U
def FN_ROL = 17.U
def FN_ROR = 18.U
def FN_BEXT = 19.U
def FN_ANDN = 24.U
def FN_ORN = 25.U
def FN_XNOR = 26.U
def FN_MAX = 28.U
def FN_MIN = 29.U
def FN_MAXU = 30.U
def FN_MINU = 31.U
def FN_MAXMIN = BitPat("b111??")
// Mul/div reuse some integer FNs
def FN_DIV = FN_XOR
def FN_DIVU = FN_SR
def FN_REM = FN_OR
def FN_REMU = FN_AND
def FN_MUL = FN_ADD
def FN_MULH = FN_SL
def FN_MULHSU = FN_SEQ
def FN_MULHU = FN_SNE
def isMulFN(fn: UInt, cmp: UInt) = fn(1,0) === cmp(1,0)
def isSub(cmd: UInt) = cmd(3)
def isCmp(cmd: UInt) = (cmd >= FN_SLT && cmd <= FN_SGEU)
def isMaxMin(cmd: UInt) = (cmd >= FN_MAX && cmd <= FN_MINU)
def cmpUnsigned(cmd: UInt) = cmd(1)
def cmpInverted(cmd: UInt) = cmd(0)
def cmpEq(cmd: UInt) = !cmd(3)
def shiftReverse(cmd: UInt) = !cmd.isOneOf(FN_SR, FN_SRA, FN_ROR, FN_BEXT)
def bwInvRs2(cmd: UInt) = cmd.isOneOf(FN_ANDN, FN_ORN, FN_XNOR)
}
import ALU._
abstract class AbstractALU(implicit p: Parameters) extends CoreModule()(p) {
val io = IO(new Bundle {
val dw = Input(UInt(SZ_DW.W))
val fn = Input(UInt(SZ_ALU_FN.W))
val in2 = Input(UInt(xLen.W))
val in1 = Input(UInt(xLen.W))
val out = Output(UInt(xLen.W))
val adder_out = Output(UInt(xLen.W))
val cmp_out = Output(Bool())
})
}
class ALU(implicit p: Parameters) extends AbstractALU()(p) {
// ADD, SUB
val in2_inv = Mux(isSub(io.fn), ~io.in2, io.in2)
val in1_xor_in2 = io.in1 ^ in2_inv
val in1_and_in2 = io.in1 & in2_inv
io.adder_out := io.in1 + in2_inv + isSub(io.fn)
// SLT, SLTU
val slt =
Mux(io.in1(xLen-1) === io.in2(xLen-1), io.adder_out(xLen-1),
Mux(cmpUnsigned(io.fn), io.in2(xLen-1), io.in1(xLen-1)))
io.cmp_out := cmpInverted(io.fn) ^ Mux(cmpEq(io.fn), in1_xor_in2 === 0.U, slt)
// SLL, SRL, SRA
val (shamt, shin_r) =
if (xLen == 32) (io.in2(4,0), io.in1)
else {
require(xLen == 64)
val shin_hi_32 = Fill(32, isSub(io.fn) && io.in1(31))
val shin_hi = Mux(io.dw === DW_64, io.in1(63,32), shin_hi_32)
val shamt = Cat(io.in2(5) & (io.dw === DW_64), io.in2(4,0))
(shamt, Cat(shin_hi, io.in1(31,0)))
}
val shin = Mux(shiftReverse(io.fn), Reverse(shin_r), shin_r)
val shout_r = (Cat(isSub(io.fn) & shin(xLen-1), shin).asSInt >> shamt)(xLen-1,0)
val shout_l = Reverse(shout_r)
val shout = Mux(io.fn === FN_SR || io.fn === FN_SRA || io.fn === FN_BEXT, shout_r, 0.U) |
Mux(io.fn === FN_SL, shout_l, 0.U)
// CZEQZ, CZNEZ
val in2_not_zero = io.in2.orR
val cond_out = Option.when(usingConditionalZero)(
Mux((io.fn === FN_CZEQZ && in2_not_zero) || (io.fn === FN_CZNEZ && !in2_not_zero), io.in1, 0.U)
)
// AND, OR, XOR
val logic = Mux(io.fn === FN_XOR || io.fn === FN_OR || io.fn === FN_ORN || io.fn === FN_XNOR, in1_xor_in2, 0.U) |
Mux(io.fn === FN_OR || io.fn === FN_AND || io.fn === FN_ORN || io.fn === FN_ANDN, in1_and_in2, 0.U)
val bext_mask = Mux(coreParams.useZbs.B && io.fn === FN_BEXT, 1.U, ~(0.U(xLen.W)))
val shift_logic = (isCmp (io.fn) && slt) | logic | (shout & bext_mask)
val shift_logic_cond = cond_out match {
case Some(co) => shift_logic | co
case _ => shift_logic
}
// CLZ, CTZ, CPOP
val tz_in = MuxLookup((io.dw === DW_32) ## !io.in2(0), 0.U)(Seq(
0.U -> io.in1,
1.U -> Reverse(io.in1),
2.U -> 1.U ## io.in1(31,0),
3.U -> 1.U ## Reverse(io.in1(31,0))
))
val popc_in = Mux(io.in2(1),
Mux(io.dw === DW_32, io.in1(31,0), io.in1),
PriorityEncoderOH(1.U ## tz_in) - 1.U)(xLen-1,0)
val count = PopCount(popc_in)
val in1_bytes = io.in1.asTypeOf(Vec(xLen / 8, UInt(8.W)))
val orcb = VecInit(in1_bytes.map(b => Fill(8, b =/= 0.U))).asUInt
val rev8 = VecInit(in1_bytes.reverse).asUInt
val unary = MuxLookup(io.in2(11,0), count)(Seq(
0x287.U -> orcb,
(if (xLen == 32) 0x698 else 0x6b8).U -> rev8,
0x080.U -> io.in1(15,0),
0x604.U -> Fill(xLen-8, io.in1(7)) ## io.in1(7,0),
0x605.U -> Fill(xLen-16, io.in1(15)) ## io.in1(15,0)
))
// MAX, MIN, MAXU, MINU
val maxmin_out = Mux(io.cmp_out, io.in2, io.in1)
// ROL, ROR
val rot_shamt = Mux(io.dw === DW_32, 32.U, xLen.U) - shamt
val rotin = Mux(io.fn(0), shin_r, Reverse(shin_r))
val rotout_r = (rotin >> rot_shamt)(xLen-1,0)
val rotout_l = Reverse(rotout_r)
val rotout = Mux(io.fn(0), rotout_r, rotout_l) | Mux(io.fn(0), shout_l, shout_r)
val out = MuxLookup(io.fn, shift_logic_cond)(Seq(
FN_ADD -> io.adder_out,
FN_SUB -> io.adder_out
) ++ (if (coreParams.useZbb) Seq(
FN_UNARY -> unary,
FN_MAX -> maxmin_out,
FN_MIN -> maxmin_out,
FN_MAXU -> maxmin_out,
FN_MINU -> maxmin_out,
FN_ROL -> rotout,
FN_ROR -> rotout,
) else Nil))
io.out := out
if (xLen > 32) {
require(xLen == 64)
when (io.dw === DW_32) { io.out := Cat(Fill(32, out(31)), out(31,0)) }
}
}
| module ALU_4( // @[ALU.scala:83:7]
input clock, // @[ALU.scala:83:7]
input reset, // @[ALU.scala:83:7]
input io_dw, // @[ALU.scala:72:14]
input [4:0] io_fn, // @[ALU.scala:72:14]
input [63:0] io_in2, // @[ALU.scala:72:14]
input [63:0] io_in1, // @[ALU.scala:72:14]
output [63:0] io_out, // @[ALU.scala:72:14]
output [63:0] io_adder_out, // @[ALU.scala:72:14]
output io_cmp_out // @[ALU.scala:72:14]
);
wire [7:0] in1_bytes_6; // @[ALU.scala:140:34]
wire [7:0] in1_bytes_5; // @[ALU.scala:140:34]
wire [7:0] in1_bytes_4; // @[ALU.scala:140:34]
wire [7:0] in1_bytes_3; // @[ALU.scala:140:34]
wire [7:0] in1_bytes_2; // @[ALU.scala:140:34]
wire [7:0] in1_bytes_1; // @[ALU.scala:140:34]
wire [7:0] in1_bytes_0; // @[ALU.scala:140:34]
wire io_dw_0 = io_dw; // @[ALU.scala:83:7]
wire [4:0] io_fn_0 = io_fn; // @[ALU.scala:83:7]
wire [63:0] io_in2_0 = io_in2; // @[ALU.scala:83:7]
wire [63:0] io_in1_0 = io_in1; // @[ALU.scala:83:7]
wire [63:0] _bext_mask_T_2 = 64'hFFFFFFFFFFFFFFFF; // @[ALU.scala:122:70]
wire [31:0] _tz_in_T_67 = 32'hFFFF; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_66 = 32'hFFFF0000; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_72 = 32'hFFFF0000; // @[ALU.scala:134:26]
wire [23:0] _tz_in_T_75 = 24'hFFFF; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_76 = 32'hFFFF00; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_77 = 32'hFF00FF; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_82 = 32'hFF00FF00; // @[ALU.scala:134:26]
wire [27:0] _tz_in_T_85 = 28'hFF00FF; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_86 = 32'hFF00FF0; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_87 = 32'hF0F0F0F; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_92 = 32'hF0F0F0F0; // @[ALU.scala:134:26]
wire [29:0] _tz_in_T_95 = 30'hF0F0F0F; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_96 = 32'h3C3C3C3C; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_97 = 32'h33333333; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_102 = 32'hCCCCCCCC; // @[ALU.scala:134:26]
wire [30:0] _tz_in_T_105 = 31'h33333333; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_106 = 32'h66666666; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_107 = 32'h55555555; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_112 = 32'hAAAAAAAA; // @[ALU.scala:134:26]
wire [63:0] _shin_T_9 = 64'hFFFFFFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shout_l_T_1 = 64'hFFFFFFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _tz_in_T_5 = 64'hFFFFFFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotin_T_2 = 64'hFFFFFFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotout_l_T_1 = 64'hFFFFFFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shin_T_8 = 64'hFFFFFFFF00000000; // @[ALU.scala:106:46]
wire [63:0] _shin_T_14 = 64'hFFFFFFFF00000000; // @[ALU.scala:106:46]
wire [63:0] _shout_l_T = 64'hFFFFFFFF00000000; // @[ALU.scala:108:24]
wire [63:0] _shout_l_T_6 = 64'hFFFFFFFF00000000; // @[ALU.scala:108:24]
wire [63:0] _tz_in_T_4 = 64'hFFFFFFFF00000000; // @[ALU.scala:132:19]
wire [63:0] _tz_in_T_10 = 64'hFFFFFFFF00000000; // @[ALU.scala:132:19]
wire [63:0] _rotin_T_1 = 64'hFFFFFFFF00000000; // @[ALU.scala:156:44]
wire [63:0] _rotin_T_7 = 64'hFFFFFFFF00000000; // @[ALU.scala:156:44]
wire [63:0] _rotout_l_T = 64'hFFFFFFFF00000000; // @[ALU.scala:158:25]
wire [63:0] _rotout_l_T_6 = 64'hFFFFFFFF00000000; // @[ALU.scala:158:25]
wire [47:0] _shin_T_17 = 48'hFFFFFFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [47:0] _shout_l_T_9 = 48'hFFFFFFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [47:0] _tz_in_T_13 = 48'hFFFFFFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [47:0] _rotin_T_10 = 48'hFFFFFFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [47:0] _rotout_l_T_9 = 48'hFFFFFFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shin_T_18 = 64'hFFFFFFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shout_l_T_10 = 64'hFFFFFFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _tz_in_T_14 = 64'hFFFFFFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotin_T_11 = 64'hFFFFFFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotout_l_T_10 = 64'hFFFFFFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shin_T_19 = 64'hFFFF0000FFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shout_l_T_11 = 64'hFFFF0000FFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _tz_in_T_15 = 64'hFFFF0000FFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotin_T_12 = 64'hFFFF0000FFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotout_l_T_11 = 64'hFFFF0000FFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shin_T_24 = 64'hFFFF0000FFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shout_l_T_16 = 64'hFFFF0000FFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _tz_in_T_20 = 64'hFFFF0000FFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotin_T_17 = 64'hFFFF0000FFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotout_l_T_16 = 64'hFFFF0000FFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [55:0] _shin_T_27 = 56'hFFFF0000FFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [55:0] _shout_l_T_19 = 56'hFFFF0000FFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [55:0] _tz_in_T_23 = 56'hFFFF0000FFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [55:0] _rotin_T_20 = 56'hFFFF0000FFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [55:0] _rotout_l_T_19 = 56'hFFFF0000FFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shin_T_28 = 64'hFFFF0000FFFF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shout_l_T_20 = 64'hFFFF0000FFFF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _tz_in_T_24 = 64'hFFFF0000FFFF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotin_T_21 = 64'hFFFF0000FFFF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotout_l_T_20 = 64'hFFFF0000FFFF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shin_T_29 = 64'hFF00FF00FF00FF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shout_l_T_21 = 64'hFF00FF00FF00FF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _tz_in_T_25 = 64'hFF00FF00FF00FF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotin_T_22 = 64'hFF00FF00FF00FF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotout_l_T_21 = 64'hFF00FF00FF00FF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shin_T_34 = 64'hFF00FF00FF00FF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shout_l_T_26 = 64'hFF00FF00FF00FF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _tz_in_T_30 = 64'hFF00FF00FF00FF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotin_T_27 = 64'hFF00FF00FF00FF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotout_l_T_26 = 64'hFF00FF00FF00FF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [59:0] _shin_T_37 = 60'hFF00FF00FF00FF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [59:0] _shout_l_T_29 = 60'hFF00FF00FF00FF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [59:0] _tz_in_T_33 = 60'hFF00FF00FF00FF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [59:0] _rotin_T_30 = 60'hFF00FF00FF00FF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [59:0] _rotout_l_T_29 = 60'hFF00FF00FF00FF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shin_T_38 = 64'hFF00FF00FF00FF0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shout_l_T_30 = 64'hFF00FF00FF00FF0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _tz_in_T_34 = 64'hFF00FF00FF00FF0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotin_T_31 = 64'hFF00FF00FF00FF0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotout_l_T_30 = 64'hFF00FF00FF00FF0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shin_T_39 = 64'hF0F0F0F0F0F0F0F; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shout_l_T_31 = 64'hF0F0F0F0F0F0F0F; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _tz_in_T_35 = 64'hF0F0F0F0F0F0F0F; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotin_T_32 = 64'hF0F0F0F0F0F0F0F; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotout_l_T_31 = 64'hF0F0F0F0F0F0F0F; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shin_T_44 = 64'hF0F0F0F0F0F0F0F0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shout_l_T_36 = 64'hF0F0F0F0F0F0F0F0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _tz_in_T_40 = 64'hF0F0F0F0F0F0F0F0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotin_T_37 = 64'hF0F0F0F0F0F0F0F0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotout_l_T_36 = 64'hF0F0F0F0F0F0F0F0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [61:0] _shin_T_47 = 62'hF0F0F0F0F0F0F0F; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [61:0] _shout_l_T_39 = 62'hF0F0F0F0F0F0F0F; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [61:0] _tz_in_T_43 = 62'hF0F0F0F0F0F0F0F; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [61:0] _rotin_T_40 = 62'hF0F0F0F0F0F0F0F; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [61:0] _rotout_l_T_39 = 62'hF0F0F0F0F0F0F0F; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shin_T_48 = 64'h3C3C3C3C3C3C3C3C; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shout_l_T_40 = 64'h3C3C3C3C3C3C3C3C; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _tz_in_T_44 = 64'h3C3C3C3C3C3C3C3C; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotin_T_41 = 64'h3C3C3C3C3C3C3C3C; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotout_l_T_40 = 64'h3C3C3C3C3C3C3C3C; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shin_T_49 = 64'h3333333333333333; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shout_l_T_41 = 64'h3333333333333333; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _tz_in_T_45 = 64'h3333333333333333; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotin_T_42 = 64'h3333333333333333; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotout_l_T_41 = 64'h3333333333333333; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shin_T_54 = 64'hCCCCCCCCCCCCCCCC; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shout_l_T_46 = 64'hCCCCCCCCCCCCCCCC; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _tz_in_T_50 = 64'hCCCCCCCCCCCCCCCC; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotin_T_47 = 64'hCCCCCCCCCCCCCCCC; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotout_l_T_46 = 64'hCCCCCCCCCCCCCCCC; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [62:0] _shin_T_57 = 63'h3333333333333333; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [62:0] _shout_l_T_49 = 63'h3333333333333333; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [62:0] _tz_in_T_53 = 63'h3333333333333333; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [62:0] _rotin_T_50 = 63'h3333333333333333; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [62:0] _rotout_l_T_49 = 63'h3333333333333333; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shin_T_58 = 64'h6666666666666666; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shout_l_T_50 = 64'h6666666666666666; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _tz_in_T_54 = 64'h6666666666666666; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotin_T_51 = 64'h6666666666666666; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotout_l_T_50 = 64'h6666666666666666; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shin_T_59 = 64'h5555555555555555; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shout_l_T_51 = 64'h5555555555555555; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _tz_in_T_55 = 64'h5555555555555555; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotin_T_52 = 64'h5555555555555555; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotout_l_T_51 = 64'h5555555555555555; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shin_T_64 = 64'hAAAAAAAAAAAAAAAA; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shout_l_T_56 = 64'hAAAAAAAAAAAAAAAA; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _tz_in_T_60 = 64'hAAAAAAAAAAAAAAAA; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotin_T_57 = 64'hAAAAAAAAAAAAAAAA; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotout_l_T_56 = 64'hAAAAAAAAAAAAAAAA; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire _shin_hi_T = io_dw_0; // @[ALU.scala:83:7, :102:31]
wire _shamt_T_1 = io_dw_0; // @[ALU.scala:83:7, :103:42]
wire [63:0] _in1_bytes_WIRE = io_in1_0; // @[ALU.scala:83:7, :140:34]
wire [63:0] _io_adder_out_T_4; // @[ALU.scala:88:36]
wire _io_cmp_out_T_5; // @[ALU.scala:94:36]
wire [63:0] io_out_0; // @[ALU.scala:83:7]
wire [63:0] io_adder_out_0; // @[ALU.scala:83:7]
wire io_cmp_out_0; // @[ALU.scala:83:7]
wire _in2_inv_T = io_fn_0[3]; // @[ALU.scala:58:29, :83:7]
wire _io_adder_out_T_2 = io_fn_0[3]; // @[ALU.scala:58:29, :83:7]
wire _io_cmp_out_T_1 = io_fn_0[3]; // @[ALU.scala:58:29, :63:30, :83:7]
wire _shin_hi_32_T = io_fn_0[3]; // @[ALU.scala:58:29, :83:7]
wire _shout_r_T = io_fn_0[3]; // @[ALU.scala:58:29, :83:7]
wire [63:0] _in2_inv_T_1 = ~io_in2_0; // @[ALU.scala:83:7, :85:35]
wire [63:0] in2_inv = _in2_inv_T ? _in2_inv_T_1 : io_in2_0; // @[ALU.scala:58:29, :83:7, :85:{20,35}]
wire [63:0] in1_xor_in2 = io_in1_0 ^ in2_inv; // @[ALU.scala:83:7, :85:20, :86:28]
wire [63:0] in1_and_in2 = io_in1_0 & in2_inv; // @[ALU.scala:83:7, :85:20, :87:28]
wire [64:0] _io_adder_out_T = {1'h0, io_in1_0} + {1'h0, in2_inv}; // @[ALU.scala:83:7, :85:20, :88:26]
wire [63:0] _io_adder_out_T_1 = _io_adder_out_T[63:0]; // @[ALU.scala:88:26]
wire [64:0] _io_adder_out_T_3 = {1'h0, _io_adder_out_T_1} + {64'h0, _io_adder_out_T_2}; // @[ALU.scala:58:29, :88:{26,36}]
assign _io_adder_out_T_4 = _io_adder_out_T_3[63:0]; // @[ALU.scala:88:36]
assign io_adder_out_0 = _io_adder_out_T_4; // @[ALU.scala:83:7, :88:36]
wire _slt_T = io_in1_0[63]; // @[ALU.scala:83:7, :92:15]
wire _slt_T_6 = io_in1_0[63]; // @[ALU.scala:83:7, :92:15, :93:51]
wire _slt_T_1 = io_in2_0[63]; // @[ALU.scala:83:7, :92:34]
wire _slt_T_5 = io_in2_0[63]; // @[ALU.scala:83:7, :92:34, :93:35]
wire _slt_T_2 = _slt_T == _slt_T_1; // @[ALU.scala:92:{15,24,34}]
wire _slt_T_3 = io_adder_out_0[63]; // @[ALU.scala:83:7, :92:56]
wire _slt_T_4 = io_fn_0[1]; // @[ALU.scala:61:35, :83:7]
wire _slt_T_7 = _slt_T_4 ? _slt_T_5 : _slt_T_6; // @[ALU.scala:61:35, :93:{8,35,51}]
wire slt = _slt_T_2 ? _slt_T_3 : _slt_T_7; // @[ALU.scala:92:{8,24,56}, :93:8]
wire _io_cmp_out_T = io_fn_0[0]; // @[ALU.scala:62:35, :83:7]
wire _rotin_T = io_fn_0[0]; // @[ALU.scala:62:35, :83:7, :156:24]
wire _rotout_T = io_fn_0[0]; // @[ALU.scala:62:35, :83:7, :159:25]
wire _rotout_T_2 = io_fn_0[0]; // @[ALU.scala:62:35, :83:7, :159:61]
wire _io_cmp_out_T_2 = ~_io_cmp_out_T_1; // @[ALU.scala:63:{26,30}]
wire _io_cmp_out_T_3 = in1_xor_in2 == 64'h0; // @[ALU.scala:86:28, :94:68]
wire _io_cmp_out_T_4 = _io_cmp_out_T_2 ? _io_cmp_out_T_3 : slt; // @[ALU.scala:63:26, :92:8, :94:{41,68}]
assign _io_cmp_out_T_5 = _io_cmp_out_T ^ _io_cmp_out_T_4; // @[ALU.scala:62:35, :94:{36,41}]
assign io_cmp_out_0 = _io_cmp_out_T_5; // @[ALU.scala:83:7, :94:36]
wire _shin_hi_32_T_1 = io_in1_0[31]; // @[ALU.scala:83:7, :101:55]
wire _shin_hi_32_T_2 = _shin_hi_32_T & _shin_hi_32_T_1; // @[ALU.scala:58:29, :101:{46,55}]
wire [31:0] shin_hi_32 = {32{_shin_hi_32_T_2}}; // @[ALU.scala:101:{28,46}]
wire [31:0] _shin_hi_T_1 = io_in1_0[63:32]; // @[ALU.scala:83:7, :102:48]
wire [31:0] _tz_in_T_6 = io_in1_0[63:32]; // @[ALU.scala:83:7, :102:48, :132:19]
wire [31:0] shin_hi = _shin_hi_T ? _shin_hi_T_1 : shin_hi_32; // @[ALU.scala:101:28, :102:{24,31,48}]
wire _shamt_T = io_in2_0[5]; // @[ALU.scala:83:7, :103:29]
wire _shamt_T_2 = _shamt_T & _shamt_T_1; // @[ALU.scala:103:{29,33,42}]
wire [4:0] _shamt_T_3 = io_in2_0[4:0]; // @[ALU.scala:83:7, :103:60]
wire [5:0] shamt = {_shamt_T_2, _shamt_T_3}; // @[ALU.scala:103:{22,33,60}]
wire [31:0] _tz_in_T_8 = io_in1_0[31:0]; // @[ALU.scala:83:7, :104:34, :132:19]
wire [31:0] _tz_in_T_63 = io_in1_0[31:0]; // @[ALU.scala:83:7, :104:34, :133:25]
wire [31:0] _tz_in_T_65 = io_in1_0[31:0]; // @[ALU.scala:83:7, :104:34, :134:33]
wire [31:0] _popc_in_T_2 = io_in1_0[31:0]; // @[ALU.scala:83:7, :104:34, :137:32]
wire [63:0] shin_r = {shin_hi, io_in1_0[31:0]}; // @[ALU.scala:83:7, :102:24, :104:{18,34}]
wire _GEN = io_fn_0 == 5'h5; // @[package.scala:16:47]
wire _shin_T; // @[package.scala:16:47]
assign _shin_T = _GEN; // @[package.scala:16:47]
wire _shout_T; // @[ALU.scala:109:25]
assign _shout_T = _GEN; // @[package.scala:16:47]
wire _GEN_0 = io_fn_0 == 5'hB; // @[package.scala:16:47]
wire _shin_T_1; // @[package.scala:16:47]
assign _shin_T_1 = _GEN_0; // @[package.scala:16:47]
wire _shout_T_1; // @[ALU.scala:109:44]
assign _shout_T_1 = _GEN_0; // @[package.scala:16:47]
wire _GEN_1 = io_fn_0 == 5'h12; // @[package.scala:16:47]
wire _shin_T_2; // @[package.scala:16:47]
assign _shin_T_2 = _GEN_1; // @[package.scala:16:47]
wire _out_T_16; // @[ALU.scala:161:47]
assign _out_T_16 = _GEN_1; // @[package.scala:16:47]
wire _GEN_2 = io_fn_0 == 5'h13; // @[package.scala:16:47]
wire _shin_T_3; // @[package.scala:16:47]
assign _shin_T_3 = _GEN_2; // @[package.scala:16:47]
wire _shout_T_3; // @[ALU.scala:109:64]
assign _shout_T_3 = _GEN_2; // @[package.scala:16:47]
wire _bext_mask_T; // @[ALU.scala:122:52]
assign _bext_mask_T = _GEN_2; // @[package.scala:16:47]
wire _shin_T_4 = _shin_T | _shin_T_1; // @[package.scala:16:47, :81:59]
wire _shin_T_5 = _shin_T_4 | _shin_T_2; // @[package.scala:16:47, :81:59]
wire _shin_T_6 = _shin_T_5 | _shin_T_3; // @[package.scala:16:47, :81:59]
wire _shin_T_7 = ~_shin_T_6; // @[package.scala:81:59]
wire [31:0] _shin_T_10 = shin_r[63:32]; // @[ALU.scala:104:18, :106:46]
wire [31:0] _rotin_T_3 = shin_r[63:32]; // @[ALU.scala:104:18, :106:46, :156:44]
wire [63:0] _shin_T_11 = {32'h0, _shin_T_10}; // @[ALU.scala:106:46]
wire [31:0] _shin_T_12 = shin_r[31:0]; // @[ALU.scala:104:18, :106:46]
wire [31:0] _rotin_T_5 = shin_r[31:0]; // @[ALU.scala:104:18, :106:46, :156:44]
wire [63:0] _shin_T_13 = {_shin_T_12, 32'h0}; // @[ALU.scala:106:46]
wire [63:0] _shin_T_15 = _shin_T_13 & 64'hFFFFFFFF00000000; // @[ALU.scala:106:46]
wire [63:0] _shin_T_16 = _shin_T_11 | _shin_T_15; // @[ALU.scala:106:46]
wire [47:0] _shin_T_20 = _shin_T_16[63:16]; // @[ALU.scala:106:46]
wire [63:0] _shin_T_21 = {16'h0, _shin_T_20 & 48'hFFFF0000FFFF}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [47:0] _shin_T_22 = _shin_T_16[47:0]; // @[ALU.scala:106:46]
wire [63:0] _shin_T_23 = {_shin_T_22, 16'h0}; // @[ALU.scala:106:46]
wire [63:0] _shin_T_25 = _shin_T_23 & 64'hFFFF0000FFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shin_T_26 = _shin_T_21 | _shin_T_25; // @[ALU.scala:106:46]
wire [55:0] _shin_T_30 = _shin_T_26[63:8]; // @[ALU.scala:106:46]
wire [63:0] _shin_T_31 = {8'h0, _shin_T_30 & 56'hFF00FF00FF00FF}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [55:0] _shin_T_32 = _shin_T_26[55:0]; // @[ALU.scala:106:46]
wire [63:0] _shin_T_33 = {_shin_T_32, 8'h0}; // @[ALU.scala:106:46]
wire [63:0] _shin_T_35 = _shin_T_33 & 64'hFF00FF00FF00FF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shin_T_36 = _shin_T_31 | _shin_T_35; // @[ALU.scala:106:46]
wire [59:0] _shin_T_40 = _shin_T_36[63:4]; // @[ALU.scala:106:46]
wire [63:0] _shin_T_41 = {4'h0, _shin_T_40 & 60'hF0F0F0F0F0F0F0F}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [59:0] _shin_T_42 = _shin_T_36[59:0]; // @[ALU.scala:106:46]
wire [63:0] _shin_T_43 = {_shin_T_42, 4'h0}; // @[ALU.scala:106:46]
wire [63:0] _shin_T_45 = _shin_T_43 & 64'hF0F0F0F0F0F0F0F0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shin_T_46 = _shin_T_41 | _shin_T_45; // @[ALU.scala:106:46]
wire [61:0] _shin_T_50 = _shin_T_46[63:2]; // @[ALU.scala:106:46]
wire [63:0] _shin_T_51 = {2'h0, _shin_T_50 & 62'h3333333333333333}; // @[package.scala:16:47]
wire [61:0] _shin_T_52 = _shin_T_46[61:0]; // @[ALU.scala:106:46]
wire [63:0] _shin_T_53 = {_shin_T_52, 2'h0}; // @[package.scala:16:47]
wire [63:0] _shin_T_55 = _shin_T_53 & 64'hCCCCCCCCCCCCCCCC; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shin_T_56 = _shin_T_51 | _shin_T_55; // @[ALU.scala:106:46]
wire [62:0] _shin_T_60 = _shin_T_56[63:1]; // @[ALU.scala:106:46]
wire [63:0] _shin_T_61 = {1'h0, _shin_T_60 & 63'h5555555555555555}; // @[ALU.scala:88:26, :106:46, :108:24, :132:19, :156:44, :158:25]
wire [62:0] _shin_T_62 = _shin_T_56[62:0]; // @[ALU.scala:106:46]
wire [63:0] _shin_T_63 = {_shin_T_62, 1'h0}; // @[ALU.scala:88:26, :106:46]
wire [63:0] _shin_T_65 = _shin_T_63 & 64'hAAAAAAAAAAAAAAAA; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shin_T_66 = _shin_T_61 | _shin_T_65; // @[ALU.scala:106:46]
wire [63:0] shin = _shin_T_7 ? _shin_T_66 : shin_r; // @[ALU.scala:64:33, :104:18, :106:{17,46}]
wire _shout_r_T_1 = shin[63]; // @[ALU.scala:106:17, :107:41]
wire _shout_r_T_2 = _shout_r_T & _shout_r_T_1; // @[ALU.scala:58:29, :107:{35,41}]
wire [64:0] _shout_r_T_3 = {_shout_r_T_2, shin}; // @[ALU.scala:106:17, :107:{21,35}]
wire [64:0] _shout_r_T_4 = _shout_r_T_3; // @[ALU.scala:107:{21,57}]
wire [64:0] _shout_r_T_5 = $signed($signed(_shout_r_T_4) >>> shamt); // @[ALU.scala:103:22, :107:{57,64}]
wire [63:0] shout_r = _shout_r_T_5[63:0]; // @[ALU.scala:107:{64,73}]
wire [31:0] _shout_l_T_2 = shout_r[63:32]; // @[ALU.scala:107:73, :108:24]
wire [63:0] _shout_l_T_3 = {32'h0, _shout_l_T_2}; // @[ALU.scala:108:24]
wire [31:0] _shout_l_T_4 = shout_r[31:0]; // @[ALU.scala:107:73, :108:24]
wire [63:0] _shout_l_T_5 = {_shout_l_T_4, 32'h0}; // @[ALU.scala:108:24]
wire [63:0] _shout_l_T_7 = _shout_l_T_5 & 64'hFFFFFFFF00000000; // @[ALU.scala:108:24]
wire [63:0] _shout_l_T_8 = _shout_l_T_3 | _shout_l_T_7; // @[ALU.scala:108:24]
wire [47:0] _shout_l_T_12 = _shout_l_T_8[63:16]; // @[ALU.scala:108:24]
wire [63:0] _shout_l_T_13 = {16'h0, _shout_l_T_12 & 48'hFFFF0000FFFF}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [47:0] _shout_l_T_14 = _shout_l_T_8[47:0]; // @[ALU.scala:108:24]
wire [63:0] _shout_l_T_15 = {_shout_l_T_14, 16'h0}; // @[ALU.scala:106:46, :108:24]
wire [63:0] _shout_l_T_17 = _shout_l_T_15 & 64'hFFFF0000FFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shout_l_T_18 = _shout_l_T_13 | _shout_l_T_17; // @[ALU.scala:108:24]
wire [55:0] _shout_l_T_22 = _shout_l_T_18[63:8]; // @[ALU.scala:108:24]
wire [63:0] _shout_l_T_23 = {8'h0, _shout_l_T_22 & 56'hFF00FF00FF00FF}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [55:0] _shout_l_T_24 = _shout_l_T_18[55:0]; // @[ALU.scala:108:24]
wire [63:0] _shout_l_T_25 = {_shout_l_T_24, 8'h0}; // @[ALU.scala:108:24]
wire [63:0] _shout_l_T_27 = _shout_l_T_25 & 64'hFF00FF00FF00FF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shout_l_T_28 = _shout_l_T_23 | _shout_l_T_27; // @[ALU.scala:108:24]
wire [59:0] _shout_l_T_32 = _shout_l_T_28[63:4]; // @[ALU.scala:108:24]
wire [63:0] _shout_l_T_33 = {4'h0, _shout_l_T_32 & 60'hF0F0F0F0F0F0F0F}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [59:0] _shout_l_T_34 = _shout_l_T_28[59:0]; // @[ALU.scala:108:24]
wire [63:0] _shout_l_T_35 = {_shout_l_T_34, 4'h0}; // @[ALU.scala:106:46, :108:24]
wire [63:0] _shout_l_T_37 = _shout_l_T_35 & 64'hF0F0F0F0F0F0F0F0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shout_l_T_38 = _shout_l_T_33 | _shout_l_T_37; // @[ALU.scala:108:24]
wire [61:0] _shout_l_T_42 = _shout_l_T_38[63:2]; // @[ALU.scala:108:24]
wire [63:0] _shout_l_T_43 = {2'h0, _shout_l_T_42 & 62'h3333333333333333}; // @[package.scala:16:47]
wire [61:0] _shout_l_T_44 = _shout_l_T_38[61:0]; // @[ALU.scala:108:24]
wire [63:0] _shout_l_T_45 = {_shout_l_T_44, 2'h0}; // @[package.scala:16:47]
wire [63:0] _shout_l_T_47 = _shout_l_T_45 & 64'hCCCCCCCCCCCCCCCC; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _shout_l_T_48 = _shout_l_T_43 | _shout_l_T_47; // @[ALU.scala:108:24]
wire [62:0] _shout_l_T_52 = _shout_l_T_48[63:1]; // @[ALU.scala:108:24]
wire [63:0] _shout_l_T_53 = {1'h0, _shout_l_T_52 & 63'h5555555555555555}; // @[ALU.scala:88:26, :106:46, :108:24, :132:19, :156:44, :158:25]
wire [62:0] _shout_l_T_54 = _shout_l_T_48[62:0]; // @[ALU.scala:108:24]
wire [63:0] _shout_l_T_55 = {_shout_l_T_54, 1'h0}; // @[ALU.scala:88:26, :108:24]
wire [63:0] _shout_l_T_57 = _shout_l_T_55 & 64'hAAAAAAAAAAAAAAAA; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] shout_l = _shout_l_T_53 | _shout_l_T_57; // @[ALU.scala:108:24]
wire _shout_T_2 = _shout_T | _shout_T_1; // @[ALU.scala:109:{25,35,44}]
wire _shout_T_4 = _shout_T_2 | _shout_T_3; // @[ALU.scala:109:{35,55,64}]
wire [63:0] _shout_T_5 = _shout_T_4 ? shout_r : 64'h0; // @[ALU.scala:107:73, :109:{18,55}]
wire _shout_T_6 = io_fn_0 == 5'h1; // @[ALU.scala:83:7, :110:25]
wire [63:0] _shout_T_7 = _shout_T_6 ? shout_l : 64'h0; // @[ALU.scala:108:24, :110:{18,25}]
wire [63:0] shout = _shout_T_5 | _shout_T_7; // @[ALU.scala:109:{18,91}, :110:18]
wire in2_not_zero = |io_in2_0; // @[ALU.scala:83:7, :113:29]
wire _logic_T = io_fn_0 == 5'h4; // @[ALU.scala:83:7, :119:25]
wire _GEN_3 = io_fn_0 == 5'h6; // @[ALU.scala:83:7, :119:45]
wire _logic_T_1; // @[ALU.scala:119:45]
assign _logic_T_1 = _GEN_3; // @[ALU.scala:119:45]
wire _logic_T_8; // @[ALU.scala:120:25]
assign _logic_T_8 = _GEN_3; // @[ALU.scala:119:45, :120:25]
wire _logic_T_2 = _logic_T | _logic_T_1; // @[ALU.scala:119:{25,36,45}]
wire _GEN_4 = io_fn_0 == 5'h19; // @[ALU.scala:83:7, :119:64]
wire _logic_T_3; // @[ALU.scala:119:64]
assign _logic_T_3 = _GEN_4; // @[ALU.scala:119:64]
wire _logic_T_11; // @[ALU.scala:120:64]
assign _logic_T_11 = _GEN_4; // @[ALU.scala:119:64, :120:64]
wire _logic_T_4 = _logic_T_2 | _logic_T_3; // @[ALU.scala:119:{36,55,64}]
wire _logic_T_5 = io_fn_0 == 5'h1A; // @[ALU.scala:83:7, :119:84]
wire _logic_T_6 = _logic_T_4 | _logic_T_5; // @[ALU.scala:119:{55,75,84}]
wire [63:0] _logic_T_7 = _logic_T_6 ? in1_xor_in2 : 64'h0; // @[ALU.scala:86:28, :119:{18,75}]
wire _logic_T_9 = io_fn_0 == 5'h7; // @[ALU.scala:83:7, :120:44]
wire _logic_T_10 = _logic_T_8 | _logic_T_9; // @[ALU.scala:120:{25,35,44}]
wire _logic_T_12 = _logic_T_10 | _logic_T_11; // @[ALU.scala:120:{35,55,64}]
wire _logic_T_13 = io_fn_0 == 5'h18; // @[ALU.scala:83:7, :120:84]
wire _logic_T_14 = _logic_T_12 | _logic_T_13; // @[ALU.scala:120:{55,75,84}]
wire [63:0] _logic_T_15 = _logic_T_14 ? in1_and_in2 : 64'h0; // @[ALU.scala:87:28, :120:{18,75}]
wire [63:0] logic_0 = _logic_T_7 | _logic_T_15; // @[ALU.scala:119:{18,115}, :120:18]
wire _bext_mask_T_1 = _bext_mask_T; // @[ALU.scala:122:{43,52}]
wire [63:0] bext_mask = _bext_mask_T_1 ? 64'h1 : 64'hFFFFFFFFFFFFFFFF; // @[ALU.scala:122:{22,43}]
wire _shift_logic_T = io_fn_0 > 5'hB; // @[ALU.scala:59:31, :83:7]
wire _shift_logic_T_1 = ~(io_fn_0[4]); // @[ALU.scala:59:48, :83:7]
wire _shift_logic_T_2 = _shift_logic_T & _shift_logic_T_1; // @[ALU.scala:59:{31,41,48}]
wire _shift_logic_T_3 = _shift_logic_T_2 & slt; // @[ALU.scala:59:41, :92:8, :123:36]
wire [63:0] _shift_logic_T_4 = {63'h0, _shift_logic_T_3} | logic_0; // @[ALU.scala:119:115, :123:{36,44}]
wire [63:0] _shift_logic_T_5 = shout & bext_mask; // @[ALU.scala:109:91, :122:22, :123:61]
wire [63:0] shift_logic = _shift_logic_T_4 | _shift_logic_T_5; // @[ALU.scala:123:{44,52,61}]
wire _tz_in_T = ~io_dw_0; // @[ALU.scala:83:7, :130:32]
wire _tz_in_T_1 = io_in2_0[0]; // @[ALU.scala:83:7, :130:53]
wire _tz_in_T_2 = ~_tz_in_T_1; // @[ALU.scala:130:{46,53}]
wire [1:0] _tz_in_T_3 = {_tz_in_T, _tz_in_T_2}; // @[ALU.scala:130:{32,43,46}]
wire [63:0] _tz_in_T_7 = {32'h0, _tz_in_T_6}; // @[ALU.scala:132:19]
wire [63:0] _tz_in_T_9 = {_tz_in_T_8, 32'h0}; // @[ALU.scala:132:19]
wire [63:0] _tz_in_T_11 = _tz_in_T_9 & 64'hFFFFFFFF00000000; // @[ALU.scala:132:19]
wire [63:0] _tz_in_T_12 = _tz_in_T_7 | _tz_in_T_11; // @[ALU.scala:132:19]
wire [47:0] _tz_in_T_16 = _tz_in_T_12[63:16]; // @[ALU.scala:132:19]
wire [63:0] _tz_in_T_17 = {16'h0, _tz_in_T_16 & 48'hFFFF0000FFFF}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [47:0] _tz_in_T_18 = _tz_in_T_12[47:0]; // @[ALU.scala:132:19]
wire [63:0] _tz_in_T_19 = {_tz_in_T_18, 16'h0}; // @[ALU.scala:106:46, :132:19]
wire [63:0] _tz_in_T_21 = _tz_in_T_19 & 64'hFFFF0000FFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _tz_in_T_22 = _tz_in_T_17 | _tz_in_T_21; // @[ALU.scala:132:19]
wire [55:0] _tz_in_T_26 = _tz_in_T_22[63:8]; // @[ALU.scala:132:19]
wire [63:0] _tz_in_T_27 = {8'h0, _tz_in_T_26 & 56'hFF00FF00FF00FF}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [55:0] _tz_in_T_28 = _tz_in_T_22[55:0]; // @[ALU.scala:132:19]
wire [63:0] _tz_in_T_29 = {_tz_in_T_28, 8'h0}; // @[ALU.scala:132:19]
wire [63:0] _tz_in_T_31 = _tz_in_T_29 & 64'hFF00FF00FF00FF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _tz_in_T_32 = _tz_in_T_27 | _tz_in_T_31; // @[ALU.scala:132:19]
wire [59:0] _tz_in_T_36 = _tz_in_T_32[63:4]; // @[ALU.scala:132:19]
wire [63:0] _tz_in_T_37 = {4'h0, _tz_in_T_36 & 60'hF0F0F0F0F0F0F0F}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [59:0] _tz_in_T_38 = _tz_in_T_32[59:0]; // @[ALU.scala:132:19]
wire [63:0] _tz_in_T_39 = {_tz_in_T_38, 4'h0}; // @[ALU.scala:106:46, :132:19]
wire [63:0] _tz_in_T_41 = _tz_in_T_39 & 64'hF0F0F0F0F0F0F0F0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _tz_in_T_42 = _tz_in_T_37 | _tz_in_T_41; // @[ALU.scala:132:19]
wire [61:0] _tz_in_T_46 = _tz_in_T_42[63:2]; // @[ALU.scala:132:19]
wire [63:0] _tz_in_T_47 = {2'h0, _tz_in_T_46 & 62'h3333333333333333}; // @[package.scala:16:47]
wire [61:0] _tz_in_T_48 = _tz_in_T_42[61:0]; // @[ALU.scala:132:19]
wire [63:0] _tz_in_T_49 = {_tz_in_T_48, 2'h0}; // @[package.scala:16:47]
wire [63:0] _tz_in_T_51 = _tz_in_T_49 & 64'hCCCCCCCCCCCCCCCC; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _tz_in_T_52 = _tz_in_T_47 | _tz_in_T_51; // @[ALU.scala:132:19]
wire [62:0] _tz_in_T_56 = _tz_in_T_52[63:1]; // @[ALU.scala:132:19]
wire [63:0] _tz_in_T_57 = {1'h0, _tz_in_T_56 & 63'h5555555555555555}; // @[ALU.scala:88:26, :106:46, :108:24, :132:19, :156:44, :158:25]
wire [62:0] _tz_in_T_58 = _tz_in_T_52[62:0]; // @[ALU.scala:132:19]
wire [63:0] _tz_in_T_59 = {_tz_in_T_58, 1'h0}; // @[ALU.scala:88:26, :132:19]
wire [63:0] _tz_in_T_61 = _tz_in_T_59 & 64'hAAAAAAAAAAAAAAAA; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _tz_in_T_62 = _tz_in_T_57 | _tz_in_T_61; // @[ALU.scala:132:19]
wire [32:0] _tz_in_T_64 = {1'h1, _tz_in_T_63}; // @[ALU.scala:133:{16,25}]
wire [15:0] _tz_in_T_68 = _tz_in_T_65[31:16]; // @[ALU.scala:134:{26,33}]
wire [31:0] _tz_in_T_69 = {16'h0, _tz_in_T_68}; // @[ALU.scala:106:46, :134:26]
wire [15:0] _tz_in_T_70 = _tz_in_T_65[15:0]; // @[ALU.scala:134:{26,33}]
wire [31:0] _tz_in_T_71 = {_tz_in_T_70, 16'h0}; // @[ALU.scala:106:46, :134:26]
wire [31:0] _tz_in_T_73 = _tz_in_T_71 & 32'hFFFF0000; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_74 = _tz_in_T_69 | _tz_in_T_73; // @[ALU.scala:134:26]
wire [23:0] _tz_in_T_78 = _tz_in_T_74[31:8]; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_79 = {8'h0, _tz_in_T_78 & 24'hFF00FF}; // @[ALU.scala:134:26]
wire [23:0] _tz_in_T_80 = _tz_in_T_74[23:0]; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_81 = {_tz_in_T_80, 8'h0}; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_83 = _tz_in_T_81 & 32'hFF00FF00; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_84 = _tz_in_T_79 | _tz_in_T_83; // @[ALU.scala:134:26]
wire [27:0] _tz_in_T_88 = _tz_in_T_84[31:4]; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_89 = {4'h0, _tz_in_T_88 & 28'hF0F0F0F}; // @[ALU.scala:106:46, :134:26]
wire [27:0] _tz_in_T_90 = _tz_in_T_84[27:0]; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_91 = {_tz_in_T_90, 4'h0}; // @[ALU.scala:106:46, :134:26]
wire [31:0] _tz_in_T_93 = _tz_in_T_91 & 32'hF0F0F0F0; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_94 = _tz_in_T_89 | _tz_in_T_93; // @[ALU.scala:134:26]
wire [29:0] _tz_in_T_98 = _tz_in_T_94[31:2]; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_99 = {2'h0, _tz_in_T_98 & 30'h33333333}; // @[package.scala:16:47]
wire [29:0] _tz_in_T_100 = _tz_in_T_94[29:0]; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_101 = {_tz_in_T_100, 2'h0}; // @[package.scala:16:47]
wire [31:0] _tz_in_T_103 = _tz_in_T_101 & 32'hCCCCCCCC; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_104 = _tz_in_T_99 | _tz_in_T_103; // @[ALU.scala:134:26]
wire [30:0] _tz_in_T_108 = _tz_in_T_104[31:1]; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_109 = {1'h0, _tz_in_T_108 & 31'h55555555}; // @[ALU.scala:88:26, :134:26]
wire [30:0] _tz_in_T_110 = _tz_in_T_104[30:0]; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_111 = {_tz_in_T_110, 1'h0}; // @[ALU.scala:88:26, :134:26]
wire [31:0] _tz_in_T_113 = _tz_in_T_111 & 32'hAAAAAAAA; // @[ALU.scala:134:26]
wire [31:0] _tz_in_T_114 = _tz_in_T_109 | _tz_in_T_113; // @[ALU.scala:134:26]
wire [32:0] _tz_in_T_115 = {1'h1, _tz_in_T_114}; // @[ALU.scala:134:{16,26}]
wire _tz_in_T_116 = _tz_in_T_3 == 2'h1; // @[ALU.scala:130:{43,62}]
wire [63:0] _tz_in_T_117 = _tz_in_T_116 ? _tz_in_T_62 : io_in1_0; // @[ALU.scala:83:7, :130:62, :132:19]
wire _tz_in_T_118 = _tz_in_T_3 == 2'h2; // @[ALU.scala:130:{43,62}]
wire [63:0] _tz_in_T_119 = _tz_in_T_118 ? {31'h0, _tz_in_T_64} : _tz_in_T_117; // @[ALU.scala:130:62, :133:16]
wire _tz_in_T_120 = &_tz_in_T_3; // @[ALU.scala:130:{43,62}]
wire [63:0] tz_in = _tz_in_T_120 ? {31'h0, _tz_in_T_115} : _tz_in_T_119; // @[ALU.scala:130:62, :134:16]
wire _popc_in_T = io_in2_0[1]; // @[ALU.scala:83:7, :136:27]
wire _popc_in_T_1 = ~io_dw_0; // @[ALU.scala:83:7, :130:32, :137:15]
wire [63:0] _popc_in_T_3 = _popc_in_T_1 ? {32'h0, _popc_in_T_2} : io_in1_0; // @[ALU.scala:83:7, :137:{8,15,32}]
wire [64:0] _popc_in_T_4 = {1'h1, tz_in}; // @[ALU.scala:130:62, :138:27]
wire _popc_in_T_5 = _popc_in_T_4[0]; // @[OneHot.scala:85:71]
wire _popc_in_T_6 = _popc_in_T_4[1]; // @[OneHot.scala:85:71]
wire _popc_in_T_7 = _popc_in_T_4[2]; // @[OneHot.scala:85:71]
wire _popc_in_T_8 = _popc_in_T_4[3]; // @[OneHot.scala:85:71]
wire _popc_in_T_9 = _popc_in_T_4[4]; // @[OneHot.scala:85:71]
wire _popc_in_T_10 = _popc_in_T_4[5]; // @[OneHot.scala:85:71]
wire _popc_in_T_11 = _popc_in_T_4[6]; // @[OneHot.scala:85:71]
wire _popc_in_T_12 = _popc_in_T_4[7]; // @[OneHot.scala:85:71]
wire _popc_in_T_13 = _popc_in_T_4[8]; // @[OneHot.scala:85:71]
wire _popc_in_T_14 = _popc_in_T_4[9]; // @[OneHot.scala:85:71]
wire _popc_in_T_15 = _popc_in_T_4[10]; // @[OneHot.scala:85:71]
wire _popc_in_T_16 = _popc_in_T_4[11]; // @[OneHot.scala:85:71]
wire _popc_in_T_17 = _popc_in_T_4[12]; // @[OneHot.scala:85:71]
wire _popc_in_T_18 = _popc_in_T_4[13]; // @[OneHot.scala:85:71]
wire _popc_in_T_19 = _popc_in_T_4[14]; // @[OneHot.scala:85:71]
wire _popc_in_T_20 = _popc_in_T_4[15]; // @[OneHot.scala:85:71]
wire _popc_in_T_21 = _popc_in_T_4[16]; // @[OneHot.scala:85:71]
wire _popc_in_T_22 = _popc_in_T_4[17]; // @[OneHot.scala:85:71]
wire _popc_in_T_23 = _popc_in_T_4[18]; // @[OneHot.scala:85:71]
wire _popc_in_T_24 = _popc_in_T_4[19]; // @[OneHot.scala:85:71]
wire _popc_in_T_25 = _popc_in_T_4[20]; // @[OneHot.scala:85:71]
wire _popc_in_T_26 = _popc_in_T_4[21]; // @[OneHot.scala:85:71]
wire _popc_in_T_27 = _popc_in_T_4[22]; // @[OneHot.scala:85:71]
wire _popc_in_T_28 = _popc_in_T_4[23]; // @[OneHot.scala:85:71]
wire _popc_in_T_29 = _popc_in_T_4[24]; // @[OneHot.scala:85:71]
wire _popc_in_T_30 = _popc_in_T_4[25]; // @[OneHot.scala:85:71]
wire _popc_in_T_31 = _popc_in_T_4[26]; // @[OneHot.scala:85:71]
wire _popc_in_T_32 = _popc_in_T_4[27]; // @[OneHot.scala:85:71]
wire _popc_in_T_33 = _popc_in_T_4[28]; // @[OneHot.scala:85:71]
wire _popc_in_T_34 = _popc_in_T_4[29]; // @[OneHot.scala:85:71]
wire _popc_in_T_35 = _popc_in_T_4[30]; // @[OneHot.scala:85:71]
wire _popc_in_T_36 = _popc_in_T_4[31]; // @[OneHot.scala:85:71]
wire _popc_in_T_37 = _popc_in_T_4[32]; // @[OneHot.scala:85:71]
wire _popc_in_T_38 = _popc_in_T_4[33]; // @[OneHot.scala:85:71]
wire _popc_in_T_39 = _popc_in_T_4[34]; // @[OneHot.scala:85:71]
wire _popc_in_T_40 = _popc_in_T_4[35]; // @[OneHot.scala:85:71]
wire _popc_in_T_41 = _popc_in_T_4[36]; // @[OneHot.scala:85:71]
wire _popc_in_T_42 = _popc_in_T_4[37]; // @[OneHot.scala:85:71]
wire _popc_in_T_43 = _popc_in_T_4[38]; // @[OneHot.scala:85:71]
wire _popc_in_T_44 = _popc_in_T_4[39]; // @[OneHot.scala:85:71]
wire _popc_in_T_45 = _popc_in_T_4[40]; // @[OneHot.scala:85:71]
wire _popc_in_T_46 = _popc_in_T_4[41]; // @[OneHot.scala:85:71]
wire _popc_in_T_47 = _popc_in_T_4[42]; // @[OneHot.scala:85:71]
wire _popc_in_T_48 = _popc_in_T_4[43]; // @[OneHot.scala:85:71]
wire _popc_in_T_49 = _popc_in_T_4[44]; // @[OneHot.scala:85:71]
wire _popc_in_T_50 = _popc_in_T_4[45]; // @[OneHot.scala:85:71]
wire _popc_in_T_51 = _popc_in_T_4[46]; // @[OneHot.scala:85:71]
wire _popc_in_T_52 = _popc_in_T_4[47]; // @[OneHot.scala:85:71]
wire _popc_in_T_53 = _popc_in_T_4[48]; // @[OneHot.scala:85:71]
wire _popc_in_T_54 = _popc_in_T_4[49]; // @[OneHot.scala:85:71]
wire _popc_in_T_55 = _popc_in_T_4[50]; // @[OneHot.scala:85:71]
wire _popc_in_T_56 = _popc_in_T_4[51]; // @[OneHot.scala:85:71]
wire _popc_in_T_57 = _popc_in_T_4[52]; // @[OneHot.scala:85:71]
wire _popc_in_T_58 = _popc_in_T_4[53]; // @[OneHot.scala:85:71]
wire _popc_in_T_59 = _popc_in_T_4[54]; // @[OneHot.scala:85:71]
wire _popc_in_T_60 = _popc_in_T_4[55]; // @[OneHot.scala:85:71]
wire _popc_in_T_61 = _popc_in_T_4[56]; // @[OneHot.scala:85:71]
wire _popc_in_T_62 = _popc_in_T_4[57]; // @[OneHot.scala:85:71]
wire _popc_in_T_63 = _popc_in_T_4[58]; // @[OneHot.scala:85:71]
wire _popc_in_T_64 = _popc_in_T_4[59]; // @[OneHot.scala:85:71]
wire _popc_in_T_65 = _popc_in_T_4[60]; // @[OneHot.scala:85:71]
wire _popc_in_T_66 = _popc_in_T_4[61]; // @[OneHot.scala:85:71]
wire _popc_in_T_67 = _popc_in_T_4[62]; // @[OneHot.scala:85:71]
wire _popc_in_T_68 = _popc_in_T_4[63]; // @[OneHot.scala:85:71]
wire _popc_in_T_69 = _popc_in_T_4[64]; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_70 = {_popc_in_T_69, 64'h0}; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_71 = _popc_in_T_68 ? 65'h8000000000000000 : _popc_in_T_70; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_72 = _popc_in_T_67 ? 65'h4000000000000000 : _popc_in_T_71; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_73 = _popc_in_T_66 ? 65'h2000000000000000 : _popc_in_T_72; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_74 = _popc_in_T_65 ? 65'h1000000000000000 : _popc_in_T_73; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_75 = _popc_in_T_64 ? 65'h800000000000000 : _popc_in_T_74; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_76 = _popc_in_T_63 ? 65'h400000000000000 : _popc_in_T_75; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_77 = _popc_in_T_62 ? 65'h200000000000000 : _popc_in_T_76; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_78 = _popc_in_T_61 ? 65'h100000000000000 : _popc_in_T_77; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_79 = _popc_in_T_60 ? 65'h80000000000000 : _popc_in_T_78; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_80 = _popc_in_T_59 ? 65'h40000000000000 : _popc_in_T_79; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_81 = _popc_in_T_58 ? 65'h20000000000000 : _popc_in_T_80; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_82 = _popc_in_T_57 ? 65'h10000000000000 : _popc_in_T_81; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_83 = _popc_in_T_56 ? 65'h8000000000000 : _popc_in_T_82; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_84 = _popc_in_T_55 ? 65'h4000000000000 : _popc_in_T_83; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_85 = _popc_in_T_54 ? 65'h2000000000000 : _popc_in_T_84; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_86 = _popc_in_T_53 ? 65'h1000000000000 : _popc_in_T_85; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_87 = _popc_in_T_52 ? 65'h800000000000 : _popc_in_T_86; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_88 = _popc_in_T_51 ? 65'h400000000000 : _popc_in_T_87; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_89 = _popc_in_T_50 ? 65'h200000000000 : _popc_in_T_88; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_90 = _popc_in_T_49 ? 65'h100000000000 : _popc_in_T_89; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_91 = _popc_in_T_48 ? 65'h80000000000 : _popc_in_T_90; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_92 = _popc_in_T_47 ? 65'h40000000000 : _popc_in_T_91; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_93 = _popc_in_T_46 ? 65'h20000000000 : _popc_in_T_92; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_94 = _popc_in_T_45 ? 65'h10000000000 : _popc_in_T_93; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_95 = _popc_in_T_44 ? 65'h8000000000 : _popc_in_T_94; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_96 = _popc_in_T_43 ? 65'h4000000000 : _popc_in_T_95; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_97 = _popc_in_T_42 ? 65'h2000000000 : _popc_in_T_96; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_98 = _popc_in_T_41 ? 65'h1000000000 : _popc_in_T_97; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_99 = _popc_in_T_40 ? 65'h800000000 : _popc_in_T_98; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_100 = _popc_in_T_39 ? 65'h400000000 : _popc_in_T_99; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_101 = _popc_in_T_38 ? 65'h200000000 : _popc_in_T_100; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_102 = _popc_in_T_37 ? 65'h100000000 : _popc_in_T_101; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_103 = _popc_in_T_36 ? 65'h80000000 : _popc_in_T_102; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_104 = _popc_in_T_35 ? 65'h40000000 : _popc_in_T_103; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_105 = _popc_in_T_34 ? 65'h20000000 : _popc_in_T_104; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_106 = _popc_in_T_33 ? 65'h10000000 : _popc_in_T_105; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_107 = _popc_in_T_32 ? 65'h8000000 : _popc_in_T_106; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_108 = _popc_in_T_31 ? 65'h4000000 : _popc_in_T_107; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_109 = _popc_in_T_30 ? 65'h2000000 : _popc_in_T_108; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_110 = _popc_in_T_29 ? 65'h1000000 : _popc_in_T_109; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_111 = _popc_in_T_28 ? 65'h800000 : _popc_in_T_110; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_112 = _popc_in_T_27 ? 65'h400000 : _popc_in_T_111; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_113 = _popc_in_T_26 ? 65'h200000 : _popc_in_T_112; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_114 = _popc_in_T_25 ? 65'h100000 : _popc_in_T_113; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_115 = _popc_in_T_24 ? 65'h80000 : _popc_in_T_114; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_116 = _popc_in_T_23 ? 65'h40000 : _popc_in_T_115; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_117 = _popc_in_T_22 ? 65'h20000 : _popc_in_T_116; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_118 = _popc_in_T_21 ? 65'h10000 : _popc_in_T_117; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_119 = _popc_in_T_20 ? 65'h8000 : _popc_in_T_118; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_120 = _popc_in_T_19 ? 65'h4000 : _popc_in_T_119; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_121 = _popc_in_T_18 ? 65'h2000 : _popc_in_T_120; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_122 = _popc_in_T_17 ? 65'h1000 : _popc_in_T_121; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_123 = _popc_in_T_16 ? 65'h800 : _popc_in_T_122; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_124 = _popc_in_T_15 ? 65'h400 : _popc_in_T_123; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_125 = _popc_in_T_14 ? 65'h200 : _popc_in_T_124; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_126 = _popc_in_T_13 ? 65'h100 : _popc_in_T_125; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_127 = _popc_in_T_12 ? 65'h80 : _popc_in_T_126; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_128 = _popc_in_T_11 ? 65'h40 : _popc_in_T_127; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_129 = _popc_in_T_10 ? 65'h20 : _popc_in_T_128; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_130 = _popc_in_T_9 ? 65'h10 : _popc_in_T_129; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_131 = _popc_in_T_8 ? 65'h8 : _popc_in_T_130; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_132 = _popc_in_T_7 ? 65'h4 : _popc_in_T_131; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_133 = _popc_in_T_6 ? 65'h2 : _popc_in_T_132; // @[OneHot.scala:85:71]
wire [64:0] _popc_in_T_134 = _popc_in_T_5 ? 65'h1 : _popc_in_T_133; // @[OneHot.scala:85:71]
wire [65:0] _popc_in_T_135 = {1'h0, _popc_in_T_134} - 66'h1; // @[Mux.scala:50:70]
wire [64:0] _popc_in_T_136 = _popc_in_T_135[64:0]; // @[ALU.scala:138:37]
wire [64:0] _popc_in_T_137 = _popc_in_T ? {1'h0, _popc_in_T_3} : _popc_in_T_136; // @[ALU.scala:88:26, :136:{20,27}, :137:8, :138:37]
wire [63:0] popc_in = _popc_in_T_137[63:0]; // @[ALU.scala:136:20, :138:43]
wire _count_T = popc_in[0]; // @[ALU.scala:138:43, :139:23]
wire _count_T_1 = popc_in[1]; // @[ALU.scala:138:43, :139:23]
wire _count_T_2 = popc_in[2]; // @[ALU.scala:138:43, :139:23]
wire _count_T_3 = popc_in[3]; // @[ALU.scala:138:43, :139:23]
wire _count_T_4 = popc_in[4]; // @[ALU.scala:138:43, :139:23]
wire _count_T_5 = popc_in[5]; // @[ALU.scala:138:43, :139:23]
wire _count_T_6 = popc_in[6]; // @[ALU.scala:138:43, :139:23]
wire _count_T_7 = popc_in[7]; // @[ALU.scala:138:43, :139:23]
wire _count_T_8 = popc_in[8]; // @[ALU.scala:138:43, :139:23]
wire _count_T_9 = popc_in[9]; // @[ALU.scala:138:43, :139:23]
wire _count_T_10 = popc_in[10]; // @[ALU.scala:138:43, :139:23]
wire _count_T_11 = popc_in[11]; // @[ALU.scala:138:43, :139:23]
wire _count_T_12 = popc_in[12]; // @[ALU.scala:138:43, :139:23]
wire _count_T_13 = popc_in[13]; // @[ALU.scala:138:43, :139:23]
wire _count_T_14 = popc_in[14]; // @[ALU.scala:138:43, :139:23]
wire _count_T_15 = popc_in[15]; // @[ALU.scala:138:43, :139:23]
wire _count_T_16 = popc_in[16]; // @[ALU.scala:138:43, :139:23]
wire _count_T_17 = popc_in[17]; // @[ALU.scala:138:43, :139:23]
wire _count_T_18 = popc_in[18]; // @[ALU.scala:138:43, :139:23]
wire _count_T_19 = popc_in[19]; // @[ALU.scala:138:43, :139:23]
wire _count_T_20 = popc_in[20]; // @[ALU.scala:138:43, :139:23]
wire _count_T_21 = popc_in[21]; // @[ALU.scala:138:43, :139:23]
wire _count_T_22 = popc_in[22]; // @[ALU.scala:138:43, :139:23]
wire _count_T_23 = popc_in[23]; // @[ALU.scala:138:43, :139:23]
wire _count_T_24 = popc_in[24]; // @[ALU.scala:138:43, :139:23]
wire _count_T_25 = popc_in[25]; // @[ALU.scala:138:43, :139:23]
wire _count_T_26 = popc_in[26]; // @[ALU.scala:138:43, :139:23]
wire _count_T_27 = popc_in[27]; // @[ALU.scala:138:43, :139:23]
wire _count_T_28 = popc_in[28]; // @[ALU.scala:138:43, :139:23]
wire _count_T_29 = popc_in[29]; // @[ALU.scala:138:43, :139:23]
wire _count_T_30 = popc_in[30]; // @[ALU.scala:138:43, :139:23]
wire _count_T_31 = popc_in[31]; // @[ALU.scala:138:43, :139:23]
wire _count_T_32 = popc_in[32]; // @[ALU.scala:138:43, :139:23]
wire _count_T_33 = popc_in[33]; // @[ALU.scala:138:43, :139:23]
wire _count_T_34 = popc_in[34]; // @[ALU.scala:138:43, :139:23]
wire _count_T_35 = popc_in[35]; // @[ALU.scala:138:43, :139:23]
wire _count_T_36 = popc_in[36]; // @[ALU.scala:138:43, :139:23]
wire _count_T_37 = popc_in[37]; // @[ALU.scala:138:43, :139:23]
wire _count_T_38 = popc_in[38]; // @[ALU.scala:138:43, :139:23]
wire _count_T_39 = popc_in[39]; // @[ALU.scala:138:43, :139:23]
wire _count_T_40 = popc_in[40]; // @[ALU.scala:138:43, :139:23]
wire _count_T_41 = popc_in[41]; // @[ALU.scala:138:43, :139:23]
wire _count_T_42 = popc_in[42]; // @[ALU.scala:138:43, :139:23]
wire _count_T_43 = popc_in[43]; // @[ALU.scala:138:43, :139:23]
wire _count_T_44 = popc_in[44]; // @[ALU.scala:138:43, :139:23]
wire _count_T_45 = popc_in[45]; // @[ALU.scala:138:43, :139:23]
wire _count_T_46 = popc_in[46]; // @[ALU.scala:138:43, :139:23]
wire _count_T_47 = popc_in[47]; // @[ALU.scala:138:43, :139:23]
wire _count_T_48 = popc_in[48]; // @[ALU.scala:138:43, :139:23]
wire _count_T_49 = popc_in[49]; // @[ALU.scala:138:43, :139:23]
wire _count_T_50 = popc_in[50]; // @[ALU.scala:138:43, :139:23]
wire _count_T_51 = popc_in[51]; // @[ALU.scala:138:43, :139:23]
wire _count_T_52 = popc_in[52]; // @[ALU.scala:138:43, :139:23]
wire _count_T_53 = popc_in[53]; // @[ALU.scala:138:43, :139:23]
wire _count_T_54 = popc_in[54]; // @[ALU.scala:138:43, :139:23]
wire _count_T_55 = popc_in[55]; // @[ALU.scala:138:43, :139:23]
wire _count_T_56 = popc_in[56]; // @[ALU.scala:138:43, :139:23]
wire _count_T_57 = popc_in[57]; // @[ALU.scala:138:43, :139:23]
wire _count_T_58 = popc_in[58]; // @[ALU.scala:138:43, :139:23]
wire _count_T_59 = popc_in[59]; // @[ALU.scala:138:43, :139:23]
wire _count_T_60 = popc_in[60]; // @[ALU.scala:138:43, :139:23]
wire _count_T_61 = popc_in[61]; // @[ALU.scala:138:43, :139:23]
wire _count_T_62 = popc_in[62]; // @[ALU.scala:138:43, :139:23]
wire _count_T_63 = popc_in[63]; // @[ALU.scala:138:43, :139:23]
wire [1:0] _count_T_64 = {1'h0, _count_T} + {1'h0, _count_T_1}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_65 = _count_T_64; // @[ALU.scala:139:23]
wire [1:0] _count_T_66 = {1'h0, _count_T_2} + {1'h0, _count_T_3}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_67 = _count_T_66; // @[ALU.scala:139:23]
wire [2:0] _count_T_68 = {1'h0, _count_T_65} + {1'h0, _count_T_67}; // @[ALU.scala:88:26, :139:23]
wire [2:0] _count_T_69 = _count_T_68; // @[ALU.scala:139:23]
wire [1:0] _count_T_70 = {1'h0, _count_T_4} + {1'h0, _count_T_5}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_71 = _count_T_70; // @[ALU.scala:139:23]
wire [1:0] _count_T_72 = {1'h0, _count_T_6} + {1'h0, _count_T_7}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_73 = _count_T_72; // @[ALU.scala:139:23]
wire [2:0] _count_T_74 = {1'h0, _count_T_71} + {1'h0, _count_T_73}; // @[ALU.scala:88:26, :139:23]
wire [2:0] _count_T_75 = _count_T_74; // @[ALU.scala:139:23]
wire [3:0] _count_T_76 = {1'h0, _count_T_69} + {1'h0, _count_T_75}; // @[ALU.scala:88:26, :139:23]
wire [3:0] _count_T_77 = _count_T_76; // @[ALU.scala:139:23]
wire [1:0] _count_T_78 = {1'h0, _count_T_8} + {1'h0, _count_T_9}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_79 = _count_T_78; // @[ALU.scala:139:23]
wire [1:0] _count_T_80 = {1'h0, _count_T_10} + {1'h0, _count_T_11}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_81 = _count_T_80; // @[ALU.scala:139:23]
wire [2:0] _count_T_82 = {1'h0, _count_T_79} + {1'h0, _count_T_81}; // @[ALU.scala:88:26, :139:23]
wire [2:0] _count_T_83 = _count_T_82; // @[ALU.scala:139:23]
wire [1:0] _count_T_84 = {1'h0, _count_T_12} + {1'h0, _count_T_13}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_85 = _count_T_84; // @[ALU.scala:139:23]
wire [1:0] _count_T_86 = {1'h0, _count_T_14} + {1'h0, _count_T_15}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_87 = _count_T_86; // @[ALU.scala:139:23]
wire [2:0] _count_T_88 = {1'h0, _count_T_85} + {1'h0, _count_T_87}; // @[ALU.scala:88:26, :139:23]
wire [2:0] _count_T_89 = _count_T_88; // @[ALU.scala:139:23]
wire [3:0] _count_T_90 = {1'h0, _count_T_83} + {1'h0, _count_T_89}; // @[ALU.scala:88:26, :139:23]
wire [3:0] _count_T_91 = _count_T_90; // @[ALU.scala:139:23]
wire [4:0] _count_T_92 = {1'h0, _count_T_77} + {1'h0, _count_T_91}; // @[ALU.scala:88:26, :139:23]
wire [4:0] _count_T_93 = _count_T_92; // @[ALU.scala:139:23]
wire [1:0] _count_T_94 = {1'h0, _count_T_16} + {1'h0, _count_T_17}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_95 = _count_T_94; // @[ALU.scala:139:23]
wire [1:0] _count_T_96 = {1'h0, _count_T_18} + {1'h0, _count_T_19}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_97 = _count_T_96; // @[ALU.scala:139:23]
wire [2:0] _count_T_98 = {1'h0, _count_T_95} + {1'h0, _count_T_97}; // @[ALU.scala:88:26, :139:23]
wire [2:0] _count_T_99 = _count_T_98; // @[ALU.scala:139:23]
wire [1:0] _count_T_100 = {1'h0, _count_T_20} + {1'h0, _count_T_21}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_101 = _count_T_100; // @[ALU.scala:139:23]
wire [1:0] _count_T_102 = {1'h0, _count_T_22} + {1'h0, _count_T_23}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_103 = _count_T_102; // @[ALU.scala:139:23]
wire [2:0] _count_T_104 = {1'h0, _count_T_101} + {1'h0, _count_T_103}; // @[ALU.scala:88:26, :139:23]
wire [2:0] _count_T_105 = _count_T_104; // @[ALU.scala:139:23]
wire [3:0] _count_T_106 = {1'h0, _count_T_99} + {1'h0, _count_T_105}; // @[ALU.scala:88:26, :139:23]
wire [3:0] _count_T_107 = _count_T_106; // @[ALU.scala:139:23]
wire [1:0] _count_T_108 = {1'h0, _count_T_24} + {1'h0, _count_T_25}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_109 = _count_T_108; // @[ALU.scala:139:23]
wire [1:0] _count_T_110 = {1'h0, _count_T_26} + {1'h0, _count_T_27}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_111 = _count_T_110; // @[ALU.scala:139:23]
wire [2:0] _count_T_112 = {1'h0, _count_T_109} + {1'h0, _count_T_111}; // @[ALU.scala:88:26, :139:23]
wire [2:0] _count_T_113 = _count_T_112; // @[ALU.scala:139:23]
wire [1:0] _count_T_114 = {1'h0, _count_T_28} + {1'h0, _count_T_29}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_115 = _count_T_114; // @[ALU.scala:139:23]
wire [1:0] _count_T_116 = {1'h0, _count_T_30} + {1'h0, _count_T_31}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_117 = _count_T_116; // @[ALU.scala:139:23]
wire [2:0] _count_T_118 = {1'h0, _count_T_115} + {1'h0, _count_T_117}; // @[ALU.scala:88:26, :139:23]
wire [2:0] _count_T_119 = _count_T_118; // @[ALU.scala:139:23]
wire [3:0] _count_T_120 = {1'h0, _count_T_113} + {1'h0, _count_T_119}; // @[ALU.scala:88:26, :139:23]
wire [3:0] _count_T_121 = _count_T_120; // @[ALU.scala:139:23]
wire [4:0] _count_T_122 = {1'h0, _count_T_107} + {1'h0, _count_T_121}; // @[ALU.scala:88:26, :139:23]
wire [4:0] _count_T_123 = _count_T_122; // @[ALU.scala:139:23]
wire [5:0] _count_T_124 = {1'h0, _count_T_93} + {1'h0, _count_T_123}; // @[ALU.scala:88:26, :139:23]
wire [5:0] _count_T_125 = _count_T_124; // @[ALU.scala:139:23]
wire [1:0] _count_T_126 = {1'h0, _count_T_32} + {1'h0, _count_T_33}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_127 = _count_T_126; // @[ALU.scala:139:23]
wire [1:0] _count_T_128 = {1'h0, _count_T_34} + {1'h0, _count_T_35}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_129 = _count_T_128; // @[ALU.scala:139:23]
wire [2:0] _count_T_130 = {1'h0, _count_T_127} + {1'h0, _count_T_129}; // @[ALU.scala:88:26, :139:23]
wire [2:0] _count_T_131 = _count_T_130; // @[ALU.scala:139:23]
wire [1:0] _count_T_132 = {1'h0, _count_T_36} + {1'h0, _count_T_37}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_133 = _count_T_132; // @[ALU.scala:139:23]
wire [1:0] _count_T_134 = {1'h0, _count_T_38} + {1'h0, _count_T_39}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_135 = _count_T_134; // @[ALU.scala:139:23]
wire [2:0] _count_T_136 = {1'h0, _count_T_133} + {1'h0, _count_T_135}; // @[ALU.scala:88:26, :139:23]
wire [2:0] _count_T_137 = _count_T_136; // @[ALU.scala:139:23]
wire [3:0] _count_T_138 = {1'h0, _count_T_131} + {1'h0, _count_T_137}; // @[ALU.scala:88:26, :139:23]
wire [3:0] _count_T_139 = _count_T_138; // @[ALU.scala:139:23]
wire [1:0] _count_T_140 = {1'h0, _count_T_40} + {1'h0, _count_T_41}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_141 = _count_T_140; // @[ALU.scala:139:23]
wire [1:0] _count_T_142 = {1'h0, _count_T_42} + {1'h0, _count_T_43}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_143 = _count_T_142; // @[ALU.scala:139:23]
wire [2:0] _count_T_144 = {1'h0, _count_T_141} + {1'h0, _count_T_143}; // @[ALU.scala:88:26, :139:23]
wire [2:0] _count_T_145 = _count_T_144; // @[ALU.scala:139:23]
wire [1:0] _count_T_146 = {1'h0, _count_T_44} + {1'h0, _count_T_45}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_147 = _count_T_146; // @[ALU.scala:139:23]
wire [1:0] _count_T_148 = {1'h0, _count_T_46} + {1'h0, _count_T_47}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_149 = _count_T_148; // @[ALU.scala:139:23]
wire [2:0] _count_T_150 = {1'h0, _count_T_147} + {1'h0, _count_T_149}; // @[ALU.scala:88:26, :139:23]
wire [2:0] _count_T_151 = _count_T_150; // @[ALU.scala:139:23]
wire [3:0] _count_T_152 = {1'h0, _count_T_145} + {1'h0, _count_T_151}; // @[ALU.scala:88:26, :139:23]
wire [3:0] _count_T_153 = _count_T_152; // @[ALU.scala:139:23]
wire [4:0] _count_T_154 = {1'h0, _count_T_139} + {1'h0, _count_T_153}; // @[ALU.scala:88:26, :139:23]
wire [4:0] _count_T_155 = _count_T_154; // @[ALU.scala:139:23]
wire [1:0] _count_T_156 = {1'h0, _count_T_48} + {1'h0, _count_T_49}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_157 = _count_T_156; // @[ALU.scala:139:23]
wire [1:0] _count_T_158 = {1'h0, _count_T_50} + {1'h0, _count_T_51}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_159 = _count_T_158; // @[ALU.scala:139:23]
wire [2:0] _count_T_160 = {1'h0, _count_T_157} + {1'h0, _count_T_159}; // @[ALU.scala:88:26, :139:23]
wire [2:0] _count_T_161 = _count_T_160; // @[ALU.scala:139:23]
wire [1:0] _count_T_162 = {1'h0, _count_T_52} + {1'h0, _count_T_53}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_163 = _count_T_162; // @[ALU.scala:139:23]
wire [1:0] _count_T_164 = {1'h0, _count_T_54} + {1'h0, _count_T_55}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_165 = _count_T_164; // @[ALU.scala:139:23]
wire [2:0] _count_T_166 = {1'h0, _count_T_163} + {1'h0, _count_T_165}; // @[ALU.scala:88:26, :139:23]
wire [2:0] _count_T_167 = _count_T_166; // @[ALU.scala:139:23]
wire [3:0] _count_T_168 = {1'h0, _count_T_161} + {1'h0, _count_T_167}; // @[ALU.scala:88:26, :139:23]
wire [3:0] _count_T_169 = _count_T_168; // @[ALU.scala:139:23]
wire [1:0] _count_T_170 = {1'h0, _count_T_56} + {1'h0, _count_T_57}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_171 = _count_T_170; // @[ALU.scala:139:23]
wire [1:0] _count_T_172 = {1'h0, _count_T_58} + {1'h0, _count_T_59}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_173 = _count_T_172; // @[ALU.scala:139:23]
wire [2:0] _count_T_174 = {1'h0, _count_T_171} + {1'h0, _count_T_173}; // @[ALU.scala:88:26, :139:23]
wire [2:0] _count_T_175 = _count_T_174; // @[ALU.scala:139:23]
wire [1:0] _count_T_176 = {1'h0, _count_T_60} + {1'h0, _count_T_61}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_177 = _count_T_176; // @[ALU.scala:139:23]
wire [1:0] _count_T_178 = {1'h0, _count_T_62} + {1'h0, _count_T_63}; // @[ALU.scala:88:26, :139:23]
wire [1:0] _count_T_179 = _count_T_178; // @[ALU.scala:139:23]
wire [2:0] _count_T_180 = {1'h0, _count_T_177} + {1'h0, _count_T_179}; // @[ALU.scala:88:26, :139:23]
wire [2:0] _count_T_181 = _count_T_180; // @[ALU.scala:139:23]
wire [3:0] _count_T_182 = {1'h0, _count_T_175} + {1'h0, _count_T_181}; // @[ALU.scala:88:26, :139:23]
wire [3:0] _count_T_183 = _count_T_182; // @[ALU.scala:139:23]
wire [4:0] _count_T_184 = {1'h0, _count_T_169} + {1'h0, _count_T_183}; // @[ALU.scala:88:26, :139:23]
wire [4:0] _count_T_185 = _count_T_184; // @[ALU.scala:139:23]
wire [5:0] _count_T_186 = {1'h0, _count_T_155} + {1'h0, _count_T_185}; // @[ALU.scala:88:26, :139:23]
wire [5:0] _count_T_187 = _count_T_186; // @[ALU.scala:139:23]
wire [6:0] _count_T_188 = {1'h0, _count_T_125} + {1'h0, _count_T_187}; // @[ALU.scala:88:26, :139:23]
wire [6:0] count = _count_T_188; // @[ALU.scala:139:23]
wire [7:0] _in1_bytes_T; // @[ALU.scala:140:34]
wire [7:0] _in1_bytes_T_1; // @[ALU.scala:140:34]
wire [7:0] _rev8_WIRE_7 = in1_bytes_0; // @[ALU.scala:140:34, :142:21]
wire [7:0] _in1_bytes_T_2; // @[ALU.scala:140:34]
wire [7:0] _rev8_WIRE_6 = in1_bytes_1; // @[ALU.scala:140:34, :142:21]
wire [7:0] _in1_bytes_T_3; // @[ALU.scala:140:34]
wire [7:0] _rev8_WIRE_5 = in1_bytes_2; // @[ALU.scala:140:34, :142:21]
wire [7:0] _in1_bytes_T_4; // @[ALU.scala:140:34]
wire [7:0] _rev8_WIRE_4 = in1_bytes_3; // @[ALU.scala:140:34, :142:21]
wire [7:0] _in1_bytes_T_5; // @[ALU.scala:140:34]
wire [7:0] _rev8_WIRE_3 = in1_bytes_4; // @[ALU.scala:140:34, :142:21]
wire [7:0] _in1_bytes_T_6; // @[ALU.scala:140:34]
wire [7:0] _rev8_WIRE_2 = in1_bytes_5; // @[ALU.scala:140:34, :142:21]
wire [7:0] _in1_bytes_T_7; // @[ALU.scala:140:34]
wire [7:0] _rev8_WIRE_1 = in1_bytes_6; // @[ALU.scala:140:34, :142:21]
wire [7:0] in1_bytes_7; // @[ALU.scala:140:34]
wire [7:0] _rev8_WIRE_0 = in1_bytes_7; // @[ALU.scala:140:34, :142:21]
assign _in1_bytes_T = _in1_bytes_WIRE[7:0]; // @[ALU.scala:140:34]
assign in1_bytes_0 = _in1_bytes_T; // @[ALU.scala:140:34]
assign _in1_bytes_T_1 = _in1_bytes_WIRE[15:8]; // @[ALU.scala:140:34]
assign in1_bytes_1 = _in1_bytes_T_1; // @[ALU.scala:140:34]
assign _in1_bytes_T_2 = _in1_bytes_WIRE[23:16]; // @[ALU.scala:140:34]
assign in1_bytes_2 = _in1_bytes_T_2; // @[ALU.scala:140:34]
assign _in1_bytes_T_3 = _in1_bytes_WIRE[31:24]; // @[ALU.scala:140:34]
assign in1_bytes_3 = _in1_bytes_T_3; // @[ALU.scala:140:34]
assign _in1_bytes_T_4 = _in1_bytes_WIRE[39:32]; // @[ALU.scala:140:34]
assign in1_bytes_4 = _in1_bytes_T_4; // @[ALU.scala:140:34]
assign _in1_bytes_T_5 = _in1_bytes_WIRE[47:40]; // @[ALU.scala:140:34]
assign in1_bytes_5 = _in1_bytes_T_5; // @[ALU.scala:140:34]
assign _in1_bytes_T_6 = _in1_bytes_WIRE[55:48]; // @[ALU.scala:140:34]
assign in1_bytes_6 = _in1_bytes_T_6; // @[ALU.scala:140:34]
assign _in1_bytes_T_7 = _in1_bytes_WIRE[63:56]; // @[ALU.scala:140:34]
assign in1_bytes_7 = _in1_bytes_T_7; // @[ALU.scala:140:34]
wire _orcb_T = |in1_bytes_0; // @[ALU.scala:140:34, :141:51]
wire [7:0] _orcb_T_1 = {8{_orcb_T}}; // @[ALU.scala:141:{45,51}]
wire [7:0] _orcb_WIRE_0 = _orcb_T_1; // @[ALU.scala:141:{21,45}]
wire _orcb_T_2 = |in1_bytes_1; // @[ALU.scala:140:34, :141:51]
wire [7:0] _orcb_T_3 = {8{_orcb_T_2}}; // @[ALU.scala:141:{45,51}]
wire [7:0] _orcb_WIRE_1 = _orcb_T_3; // @[ALU.scala:141:{21,45}]
wire _orcb_T_4 = |in1_bytes_2; // @[ALU.scala:140:34, :141:51]
wire [7:0] _orcb_T_5 = {8{_orcb_T_4}}; // @[ALU.scala:141:{45,51}]
wire [7:0] _orcb_WIRE_2 = _orcb_T_5; // @[ALU.scala:141:{21,45}]
wire _orcb_T_6 = |in1_bytes_3; // @[ALU.scala:140:34, :141:51]
wire [7:0] _orcb_T_7 = {8{_orcb_T_6}}; // @[ALU.scala:141:{45,51}]
wire [7:0] _orcb_WIRE_3 = _orcb_T_7; // @[ALU.scala:141:{21,45}]
wire _orcb_T_8 = |in1_bytes_4; // @[ALU.scala:140:34, :141:51]
wire [7:0] _orcb_T_9 = {8{_orcb_T_8}}; // @[ALU.scala:141:{45,51}]
wire [7:0] _orcb_WIRE_4 = _orcb_T_9; // @[ALU.scala:141:{21,45}]
wire _orcb_T_10 = |in1_bytes_5; // @[ALU.scala:140:34, :141:51]
wire [7:0] _orcb_T_11 = {8{_orcb_T_10}}; // @[ALU.scala:141:{45,51}]
wire [7:0] _orcb_WIRE_5 = _orcb_T_11; // @[ALU.scala:141:{21,45}]
wire _orcb_T_12 = |in1_bytes_6; // @[ALU.scala:140:34, :141:51]
wire [7:0] _orcb_T_13 = {8{_orcb_T_12}}; // @[ALU.scala:141:{45,51}]
wire [7:0] _orcb_WIRE_6 = _orcb_T_13; // @[ALU.scala:141:{21,45}]
wire _orcb_T_14 = |in1_bytes_7; // @[ALU.scala:140:34, :141:51]
wire [7:0] _orcb_T_15 = {8{_orcb_T_14}}; // @[ALU.scala:141:{45,51}]
wire [7:0] _orcb_WIRE_7 = _orcb_T_15; // @[ALU.scala:141:{21,45}]
wire [15:0] orcb_lo_lo = {_orcb_WIRE_1, _orcb_WIRE_0}; // @[ALU.scala:141:{21,62}]
wire [15:0] orcb_lo_hi = {_orcb_WIRE_3, _orcb_WIRE_2}; // @[ALU.scala:141:{21,62}]
wire [31:0] orcb_lo = {orcb_lo_hi, orcb_lo_lo}; // @[ALU.scala:141:62]
wire [15:0] orcb_hi_lo = {_orcb_WIRE_5, _orcb_WIRE_4}; // @[ALU.scala:141:{21,62}]
wire [15:0] orcb_hi_hi = {_orcb_WIRE_7, _orcb_WIRE_6}; // @[ALU.scala:141:{21,62}]
wire [31:0] orcb_hi = {orcb_hi_hi, orcb_hi_lo}; // @[ALU.scala:141:62]
wire [63:0] orcb = {orcb_hi, orcb_lo}; // @[ALU.scala:141:62]
wire [15:0] rev8_lo_lo = {_rev8_WIRE_1, _rev8_WIRE_0}; // @[ALU.scala:142:{21,41}]
wire [15:0] rev8_lo_hi = {_rev8_WIRE_3, _rev8_WIRE_2}; // @[ALU.scala:142:{21,41}]
wire [31:0] rev8_lo = {rev8_lo_hi, rev8_lo_lo}; // @[ALU.scala:142:41]
wire [15:0] rev8_hi_lo = {_rev8_WIRE_5, _rev8_WIRE_4}; // @[ALU.scala:142:{21,41}]
wire [15:0] rev8_hi_hi = {_rev8_WIRE_7, _rev8_WIRE_6}; // @[ALU.scala:142:{21,41}]
wire [31:0] rev8_hi = {rev8_hi_hi, rev8_hi_lo}; // @[ALU.scala:142:41]
wire [63:0] rev8 = {rev8_hi, rev8_lo}; // @[ALU.scala:142:41]
wire [11:0] _unary_T = io_in2_0[11:0]; // @[ALU.scala:83:7, :143:31]
wire [15:0] _unary_T_1 = io_in1_0[15:0]; // @[ALU.scala:83:7, :146:22]
wire [15:0] _unary_T_8 = io_in1_0[15:0]; // @[ALU.scala:83:7, :146:22, :148:51]
wire _unary_T_2 = io_in1_0[7]; // @[ALU.scala:83:7, :147:35]
wire [55:0] _unary_T_3 = {56{_unary_T_2}}; // @[ALU.scala:147:{20,35}]
wire [7:0] _unary_T_4 = io_in1_0[7:0]; // @[ALU.scala:83:7, :147:49]
wire [63:0] _unary_T_5 = {_unary_T_3, _unary_T_4}; // @[ALU.scala:147:{20,40,49}]
wire _unary_T_6 = io_in1_0[15]; // @[ALU.scala:83:7, :148:36]
wire [47:0] _unary_T_7 = {48{_unary_T_6}}; // @[ALU.scala:148:{20,36}]
wire [63:0] _unary_T_9 = {_unary_T_7, _unary_T_8}; // @[ALU.scala:148:{20,42,51}]
wire _unary_T_10 = _unary_T == 12'h287; // @[ALU.scala:143:{31,45}]
wire [63:0] _unary_T_11 = _unary_T_10 ? orcb : {57'h0, count}; // @[ALU.scala:139:23, :141:62, :143:45]
wire _unary_T_12 = _unary_T == 12'h6B8; // @[ALU.scala:143:{31,45}]
wire [63:0] _unary_T_13 = _unary_T_12 ? rev8 : _unary_T_11; // @[ALU.scala:142:41, :143:45]
wire _unary_T_14 = _unary_T == 12'h80; // @[ALU.scala:143:{31,45}]
wire [63:0] _unary_T_15 = _unary_T_14 ? {48'h0, _unary_T_1} : _unary_T_13; // @[ALU.scala:143:45, :146:22]
wire _unary_T_16 = _unary_T == 12'h604; // @[ALU.scala:143:{31,45}]
wire [63:0] _unary_T_17 = _unary_T_16 ? _unary_T_5 : _unary_T_15; // @[ALU.scala:143:45, :147:40]
wire _unary_T_18 = _unary_T == 12'h605; // @[ALU.scala:143:{31,45}]
wire [63:0] unary = _unary_T_18 ? _unary_T_9 : _unary_T_17; // @[ALU.scala:143:45, :148:42]
wire [63:0] maxmin_out = io_cmp_out_0 ? io_in2_0 : io_in1_0; // @[ALU.scala:83:7, :152:23]
wire _rot_shamt_T = ~io_dw_0; // @[ALU.scala:83:7, :130:32, :155:29]
wire [6:0] _rot_shamt_T_1 = _rot_shamt_T ? 7'h20 : 7'h40; // @[ALU.scala:155:{22,29}]
wire [7:0] _rot_shamt_T_2 = {1'h0, _rot_shamt_T_1} - {2'h0, shamt}; // @[package.scala:16:47]
wire [6:0] rot_shamt = _rot_shamt_T_2[6:0]; // @[ALU.scala:155:54]
wire [63:0] _rotin_T_4 = {32'h0, _rotin_T_3}; // @[ALU.scala:156:44]
wire [63:0] _rotin_T_6 = {_rotin_T_5, 32'h0}; // @[ALU.scala:156:44]
wire [63:0] _rotin_T_8 = _rotin_T_6 & 64'hFFFFFFFF00000000; // @[ALU.scala:156:44]
wire [63:0] _rotin_T_9 = _rotin_T_4 | _rotin_T_8; // @[ALU.scala:156:44]
wire [47:0] _rotin_T_13 = _rotin_T_9[63:16]; // @[ALU.scala:156:44]
wire [63:0] _rotin_T_14 = {16'h0, _rotin_T_13 & 48'hFFFF0000FFFF}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [47:0] _rotin_T_15 = _rotin_T_9[47:0]; // @[ALU.scala:156:44]
wire [63:0] _rotin_T_16 = {_rotin_T_15, 16'h0}; // @[ALU.scala:106:46, :156:44]
wire [63:0] _rotin_T_18 = _rotin_T_16 & 64'hFFFF0000FFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotin_T_19 = _rotin_T_14 | _rotin_T_18; // @[ALU.scala:156:44]
wire [55:0] _rotin_T_23 = _rotin_T_19[63:8]; // @[ALU.scala:156:44]
wire [63:0] _rotin_T_24 = {8'h0, _rotin_T_23 & 56'hFF00FF00FF00FF}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [55:0] _rotin_T_25 = _rotin_T_19[55:0]; // @[ALU.scala:156:44]
wire [63:0] _rotin_T_26 = {_rotin_T_25, 8'h0}; // @[ALU.scala:156:44]
wire [63:0] _rotin_T_28 = _rotin_T_26 & 64'hFF00FF00FF00FF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotin_T_29 = _rotin_T_24 | _rotin_T_28; // @[ALU.scala:156:44]
wire [59:0] _rotin_T_33 = _rotin_T_29[63:4]; // @[ALU.scala:156:44]
wire [63:0] _rotin_T_34 = {4'h0, _rotin_T_33 & 60'hF0F0F0F0F0F0F0F}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [59:0] _rotin_T_35 = _rotin_T_29[59:0]; // @[ALU.scala:156:44]
wire [63:0] _rotin_T_36 = {_rotin_T_35, 4'h0}; // @[ALU.scala:106:46, :156:44]
wire [63:0] _rotin_T_38 = _rotin_T_36 & 64'hF0F0F0F0F0F0F0F0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotin_T_39 = _rotin_T_34 | _rotin_T_38; // @[ALU.scala:156:44]
wire [61:0] _rotin_T_43 = _rotin_T_39[63:2]; // @[ALU.scala:156:44]
wire [63:0] _rotin_T_44 = {2'h0, _rotin_T_43 & 62'h3333333333333333}; // @[package.scala:16:47]
wire [61:0] _rotin_T_45 = _rotin_T_39[61:0]; // @[ALU.scala:156:44]
wire [63:0] _rotin_T_46 = {_rotin_T_45, 2'h0}; // @[package.scala:16:47]
wire [63:0] _rotin_T_48 = _rotin_T_46 & 64'hCCCCCCCCCCCCCCCC; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotin_T_49 = _rotin_T_44 | _rotin_T_48; // @[ALU.scala:156:44]
wire [62:0] _rotin_T_53 = _rotin_T_49[63:1]; // @[ALU.scala:156:44]
wire [63:0] _rotin_T_54 = {1'h0, _rotin_T_53 & 63'h5555555555555555}; // @[ALU.scala:88:26, :106:46, :108:24, :132:19, :156:44, :158:25]
wire [62:0] _rotin_T_55 = _rotin_T_49[62:0]; // @[ALU.scala:156:44]
wire [63:0] _rotin_T_56 = {_rotin_T_55, 1'h0}; // @[ALU.scala:88:26, :156:44]
wire [63:0] _rotin_T_58 = _rotin_T_56 & 64'hAAAAAAAAAAAAAAAA; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotin_T_59 = _rotin_T_54 | _rotin_T_58; // @[ALU.scala:156:44]
wire [63:0] rotin = _rotin_T ? shin_r : _rotin_T_59; // @[ALU.scala:104:18, :156:{18,24,44}]
wire [63:0] _rotout_r_T = rotin >> rot_shamt; // @[ALU.scala:155:54, :156:18, :157:25]
wire [63:0] rotout_r = _rotout_r_T; // @[ALU.scala:157:{25,38}]
wire [31:0] _rotout_l_T_2 = rotout_r[63:32]; // @[ALU.scala:157:38, :158:25]
wire [63:0] _rotout_l_T_3 = {32'h0, _rotout_l_T_2}; // @[ALU.scala:158:25]
wire [31:0] _rotout_l_T_4 = rotout_r[31:0]; // @[ALU.scala:157:38, :158:25]
wire [63:0] _rotout_l_T_5 = {_rotout_l_T_4, 32'h0}; // @[ALU.scala:158:25]
wire [63:0] _rotout_l_T_7 = _rotout_l_T_5 & 64'hFFFFFFFF00000000; // @[ALU.scala:158:25]
wire [63:0] _rotout_l_T_8 = _rotout_l_T_3 | _rotout_l_T_7; // @[ALU.scala:158:25]
wire [47:0] _rotout_l_T_12 = _rotout_l_T_8[63:16]; // @[ALU.scala:158:25]
wire [63:0] _rotout_l_T_13 = {16'h0, _rotout_l_T_12 & 48'hFFFF0000FFFF}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [47:0] _rotout_l_T_14 = _rotout_l_T_8[47:0]; // @[ALU.scala:158:25]
wire [63:0] _rotout_l_T_15 = {_rotout_l_T_14, 16'h0}; // @[ALU.scala:106:46, :158:25]
wire [63:0] _rotout_l_T_17 = _rotout_l_T_15 & 64'hFFFF0000FFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotout_l_T_18 = _rotout_l_T_13 | _rotout_l_T_17; // @[ALU.scala:158:25]
wire [55:0] _rotout_l_T_22 = _rotout_l_T_18[63:8]; // @[ALU.scala:158:25]
wire [63:0] _rotout_l_T_23 = {8'h0, _rotout_l_T_22 & 56'hFF00FF00FF00FF}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [55:0] _rotout_l_T_24 = _rotout_l_T_18[55:0]; // @[ALU.scala:158:25]
wire [63:0] _rotout_l_T_25 = {_rotout_l_T_24, 8'h0}; // @[ALU.scala:158:25]
wire [63:0] _rotout_l_T_27 = _rotout_l_T_25 & 64'hFF00FF00FF00FF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotout_l_T_28 = _rotout_l_T_23 | _rotout_l_T_27; // @[ALU.scala:158:25]
wire [59:0] _rotout_l_T_32 = _rotout_l_T_28[63:4]; // @[ALU.scala:158:25]
wire [63:0] _rotout_l_T_33 = {4'h0, _rotout_l_T_32 & 60'hF0F0F0F0F0F0F0F}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [59:0] _rotout_l_T_34 = _rotout_l_T_28[59:0]; // @[ALU.scala:158:25]
wire [63:0] _rotout_l_T_35 = {_rotout_l_T_34, 4'h0}; // @[ALU.scala:106:46, :158:25]
wire [63:0] _rotout_l_T_37 = _rotout_l_T_35 & 64'hF0F0F0F0F0F0F0F0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotout_l_T_38 = _rotout_l_T_33 | _rotout_l_T_37; // @[ALU.scala:158:25]
wire [61:0] _rotout_l_T_42 = _rotout_l_T_38[63:2]; // @[ALU.scala:158:25]
wire [63:0] _rotout_l_T_43 = {2'h0, _rotout_l_T_42 & 62'h3333333333333333}; // @[package.scala:16:47]
wire [61:0] _rotout_l_T_44 = _rotout_l_T_38[61:0]; // @[ALU.scala:158:25]
wire [63:0] _rotout_l_T_45 = {_rotout_l_T_44, 2'h0}; // @[package.scala:16:47]
wire [63:0] _rotout_l_T_47 = _rotout_l_T_45 & 64'hCCCCCCCCCCCCCCCC; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] _rotout_l_T_48 = _rotout_l_T_43 | _rotout_l_T_47; // @[ALU.scala:158:25]
wire [62:0] _rotout_l_T_52 = _rotout_l_T_48[63:1]; // @[ALU.scala:158:25]
wire [63:0] _rotout_l_T_53 = {1'h0, _rotout_l_T_52 & 63'h5555555555555555}; // @[ALU.scala:88:26, :106:46, :108:24, :132:19, :156:44, :158:25]
wire [62:0] _rotout_l_T_54 = _rotout_l_T_48[62:0]; // @[ALU.scala:158:25]
wire [63:0] _rotout_l_T_55 = {_rotout_l_T_54, 1'h0}; // @[ALU.scala:88:26, :158:25]
wire [63:0] _rotout_l_T_57 = _rotout_l_T_55 & 64'hAAAAAAAAAAAAAAAA; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25]
wire [63:0] rotout_l = _rotout_l_T_53 | _rotout_l_T_57; // @[ALU.scala:158:25]
wire [63:0] _rotout_T_1 = _rotout_T ? rotout_r : rotout_l; // @[ALU.scala:157:38, :158:25, :159:{19,25}]
wire [63:0] _rotout_T_3 = _rotout_T_2 ? shout_l : shout_r; // @[ALU.scala:107:73, :108:24, :159:{55,61}]
wire [63:0] rotout = _rotout_T_1 | _rotout_T_3; // @[ALU.scala:159:{19,50,55}]
wire _out_T = io_fn_0 == 5'h0; // @[ALU.scala:83:7, :161:47]
wire [63:0] _out_T_1 = _out_T ? io_adder_out_0 : shift_logic; // @[ALU.scala:83:7, :123:52, :161:47]
wire _out_T_2 = io_fn_0 == 5'hA; // @[ALU.scala:83:7, :161:47]
wire [63:0] _out_T_3 = _out_T_2 ? io_adder_out_0 : _out_T_1; // @[ALU.scala:83:7, :161:47]
wire _out_T_4 = io_fn_0 == 5'h10; // @[ALU.scala:83:7, :161:47]
wire [63:0] _out_T_5 = _out_T_4 ? unary : _out_T_3; // @[ALU.scala:143:45, :161:47]
wire _out_T_6 = io_fn_0 == 5'h1C; // @[ALU.scala:83:7, :161:47]
wire [63:0] _out_T_7 = _out_T_6 ? maxmin_out : _out_T_5; // @[ALU.scala:152:23, :161:47]
wire _out_T_8 = io_fn_0 == 5'h1D; // @[ALU.scala:83:7, :161:47]
wire [63:0] _out_T_9 = _out_T_8 ? maxmin_out : _out_T_7; // @[ALU.scala:152:23, :161:47]
wire _out_T_10 = io_fn_0 == 5'h1E; // @[ALU.scala:83:7, :161:47]
wire [63:0] _out_T_11 = _out_T_10 ? maxmin_out : _out_T_9; // @[ALU.scala:152:23, :161:47]
wire _out_T_12 = &io_fn_0; // @[ALU.scala:83:7, :161:47]
wire [63:0] _out_T_13 = _out_T_12 ? maxmin_out : _out_T_11; // @[ALU.scala:152:23, :161:47]
wire _out_T_14 = io_fn_0 == 5'h11; // @[ALU.scala:83:7, :161:47]
wire [63:0] _out_T_15 = _out_T_14 ? rotout : _out_T_13; // @[ALU.scala:159:50, :161:47]
wire [63:0] out = _out_T_16 ? rotout : _out_T_15; // @[ALU.scala:159:50, :161:47]
wire _io_out_T = out[31]; // @[ALU.scala:161:47, :178:56]
wire [31:0] _io_out_T_1 = {32{_io_out_T}}; // @[ALU.scala:178:{48,56}]
wire [31:0] _io_out_T_2 = out[31:0]; // @[ALU.scala:161:47, :178:66]
wire [63:0] _io_out_T_3 = {_io_out_T_1, _io_out_T_2}; // @[ALU.scala:178:{43,48,66}]
assign io_out_0 = io_dw_0 ? out : _io_out_T_3; // @[ALU.scala:83:7, :161:47, :175:10, :178:{28,37,43}]
assign io_out = io_out_0; // @[ALU.scala:83:7]
assign io_adder_out = io_adder_out_0; // @[ALU.scala:83:7]
assign io_cmp_out = io_cmp_out_0; // @[ALU.scala:83:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File Arbiter.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
object TLArbiter
{
// (valids, select) => readys
type Policy = (Integer, UInt, Bool) => UInt
val lowestIndexFirst: Policy = (width, valids, select) => ~(leftOR(valids) << 1)(width-1, 0)
val highestIndexFirst: Policy = (width, valids, select) => ~((rightOR(valids) >> 1).pad(width))
val roundRobin: Policy = (width, valids, select) => if (width == 1) 1.U(1.W) else {
val valid = valids(width-1, 0)
assert (valid === valids)
val mask = RegInit(((BigInt(1) << width)-1).U(width-1,0))
val filter = Cat(valid & ~mask, valid)
val unready = (rightOR(filter, width*2, width) >> 1) | (mask << width)
val readys = ~((unready >> width) & unready(width-1, 0))
when (select && valid.orR) {
mask := leftOR(readys & valid, width)
}
readys(width-1, 0)
}
def lowestFromSeq[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: Seq[DecoupledIO[T]]): Unit = {
apply(lowestIndexFirst)(sink, sources.map(s => (edge.numBeats1(s.bits), s)):_*)
}
def lowest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = {
apply(lowestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*)
}
def highest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = {
apply(highestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*)
}
def robin[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = {
apply(roundRobin)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*)
}
def apply[T <: Data](policy: Policy)(sink: DecoupledIO[T], sources: (UInt, DecoupledIO[T])*): Unit = {
if (sources.isEmpty) {
sink.bits := DontCare
} else if (sources.size == 1) {
sink :<>= sources.head._2
} else {
val pairs = sources.toList
val beatsIn = pairs.map(_._1)
val sourcesIn = pairs.map(_._2)
// The number of beats which remain to be sent
val beatsLeft = RegInit(0.U)
val idle = beatsLeft === 0.U
val latch = idle && sink.ready // winner (if any) claims sink
// Who wants access to the sink?
val valids = sourcesIn.map(_.valid)
// Arbitrate amongst the requests
val readys = VecInit(policy(valids.size, Cat(valids.reverse), latch).asBools)
// Which request wins arbitration?
val winner = VecInit((readys zip valids) map { case (r,v) => r&&v })
// Confirm the policy works properly
require (readys.size == valids.size)
// Never two winners
val prefixOR = winner.scanLeft(false.B)(_||_).init
assert((prefixOR zip winner) map { case (p,w) => !p || !w } reduce {_ && _})
// If there was any request, there is a winner
assert (!valids.reduce(_||_) || winner.reduce(_||_))
// Track remaining beats
val maskedBeats = (winner zip beatsIn) map { case (w,b) => Mux(w, b, 0.U) }
val initBeats = maskedBeats.reduce(_ | _) // no winner => 0 beats
beatsLeft := Mux(latch, initBeats, beatsLeft - sink.fire)
// The one-hot source granted access in the previous cycle
val state = RegInit(VecInit(Seq.fill(sources.size)(false.B)))
val muxState = Mux(idle, winner, state)
state := muxState
val allowed = Mux(idle, readys, state)
(sourcesIn zip allowed) foreach { case (s, r) =>
s.ready := sink.ready && r
}
sink.valid := Mux(idle, valids.reduce(_||_), Mux1H(state, valids))
sink.bits :<= Mux1H(muxState, sourcesIn.map(_.bits))
}
}
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
abstract class DecoupledArbiterTest(
policy: TLArbiter.Policy,
txns: Int,
timeout: Int,
val numSources: Int,
beatsLeftFromIdx: Int => UInt)
(implicit p: Parameters) extends UnitTest(timeout)
{
val sources = Wire(Vec(numSources, DecoupledIO(UInt(log2Ceil(numSources).W))))
dontTouch(sources.suggestName("sources"))
val sink = Wire(DecoupledIO(UInt(log2Ceil(numSources).W)))
dontTouch(sink.suggestName("sink"))
val count = RegInit(0.U(log2Ceil(txns).W))
val lfsr = LFSR(16, true.B)
sources.zipWithIndex.map { case (z, i) => z.bits := i.U }
TLArbiter(policy)(sink, sources.zipWithIndex.map {
case (z, i) => (beatsLeftFromIdx(i), z)
}:_*)
count := count + 1.U
io.finished := count >= txns.U
}
/** This tests that when a specific pattern of source valids are driven,
* a new index from amongst that pattern is always selected,
* unless one of those sources takes multiple beats,
* in which case the same index should be selected until the arbiter goes idle.
*/
class TLDecoupledArbiterRobinTest(txns: Int = 128, timeout: Int = 500000, print: Boolean = false)
(implicit p: Parameters)
extends DecoupledArbiterTest(TLArbiter.roundRobin, txns, timeout, 6, i => i.U)
{
val lastWinner = RegInit((numSources+1).U)
val beatsLeft = RegInit(0.U(log2Ceil(numSources).W))
val first = lastWinner > numSources.U
val valid = lfsr(0)
val ready = lfsr(15)
sink.ready := ready
sources.zipWithIndex.map { // pattern: every even-indexed valid is driven the same random way
case (s, i) => s.valid := (if (i % 2 == 1) false.B else valid)
}
when (sink.fire) {
if (print) { printf("TestRobin: %d\n", sink.bits) }
when (beatsLeft === 0.U) {
assert(lastWinner =/= sink.bits, "Round robin did not pick a new idx despite one being valid.")
lastWinner := sink.bits
beatsLeft := sink.bits
} .otherwise {
assert(lastWinner === sink.bits, "Round robin did not pick the same index over multiple beats")
beatsLeft := beatsLeft - 1.U
}
}
if (print) {
when (!sink.fire) { printf("TestRobin: idle (%d %d)\n", valid, ready) }
}
}
/** This tests that the lowest index is always selected across random single cycle transactions. */
class TLDecoupledArbiterLowestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters)
extends DecoupledArbiterTest(TLArbiter.lowestIndexFirst, txns, timeout, 15, _ => 0.U)
{
def assertLowest(id: Int): Unit = {
when (sources(id).valid) {
assert((numSources-1 until id by -1).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a higher valid source was granted ready.")
}
}
sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) }
sink.ready := lfsr(15)
when (sink.fire) { (0 until numSources).foreach(assertLowest(_)) }
}
/** This tests that the highest index is always selected across random single cycle transactions. */
class TLDecoupledArbiterHighestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters)
extends DecoupledArbiterTest(TLArbiter.highestIndexFirst, txns, timeout, 15, _ => 0.U)
{
def assertHighest(id: Int): Unit = {
when (sources(id).valid) {
assert((0 until id).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a lower valid source was granted ready.")
}
}
sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) }
sink.ready := lfsr(15)
when (sink.fire) { (0 until numSources).foreach(assertHighest(_)) }
}
File Xbar.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressDecoder, AddressSet, RegionType, IdRange, TriStateValue}
import freechips.rocketchip.util.BundleField
// Trades off slave port proximity against routing resource cost
object ForceFanout
{
def apply[T](
a: TriStateValue = TriStateValue.unset,
b: TriStateValue = TriStateValue.unset,
c: TriStateValue = TriStateValue.unset,
d: TriStateValue = TriStateValue.unset,
e: TriStateValue = TriStateValue.unset)(body: Parameters => T)(implicit p: Parameters) =
{
body(p.alterPartial {
case ForceFanoutKey => p(ForceFanoutKey) match {
case ForceFanoutParams(pa, pb, pc, pd, pe) =>
ForceFanoutParams(a.update(pa), b.update(pb), c.update(pc), d.update(pd), e.update(pe))
}
})
}
}
private case class ForceFanoutParams(a: Boolean, b: Boolean, c: Boolean, d: Boolean, e: Boolean)
private case object ForceFanoutKey extends Field(ForceFanoutParams(false, false, false, false, false))
class TLXbar(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule
{
val node = new TLNexusNode(
clientFn = { seq =>
seq(0).v1copy(
echoFields = BundleField.union(seq.flatMap(_.echoFields)),
requestFields = BundleField.union(seq.flatMap(_.requestFields)),
responseKeys = seq.flatMap(_.responseKeys).distinct,
minLatency = seq.map(_.minLatency).min,
clients = (TLXbar.mapInputIds(seq) zip seq) flatMap { case (range, port) =>
port.clients map { client => client.v1copy(
sourceId = client.sourceId.shift(range.start)
)}
}
)
},
managerFn = { seq =>
val fifoIdFactory = TLXbar.relabeler()
seq(0).v1copy(
responseFields = BundleField.union(seq.flatMap(_.responseFields)),
requestKeys = seq.flatMap(_.requestKeys).distinct,
minLatency = seq.map(_.minLatency).min,
endSinkId = TLXbar.mapOutputIds(seq).map(_.end).max,
managers = seq.flatMap { port =>
require (port.beatBytes == seq(0).beatBytes,
s"Xbar ($name with parent $parent) data widths don't match: ${port.managers.map(_.name)} has ${port.beatBytes}B vs ${seq(0).managers.map(_.name)} has ${seq(0).beatBytes}B")
val fifoIdMapper = fifoIdFactory()
port.managers map { manager => manager.v1copy(
fifoId = manager.fifoId.map(fifoIdMapper(_))
)}
}
)
}
){
override def circuitIdentity = outputs.size == 1 && inputs.size == 1
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
if ((node.in.size * node.out.size) > (8*32)) {
println (s"!!! WARNING !!!")
println (s" Your TLXbar ($name with parent $parent) is very large, with ${node.in.size} Masters and ${node.out.size} Slaves.")
println (s"!!! WARNING !!!")
}
val wide_bundle = TLBundleParameters.union((node.in ++ node.out).map(_._2.bundle))
override def desiredName = (Seq("TLXbar") ++ nameSuffix ++ Seq(s"i${node.in.size}_o${node.out.size}_${wide_bundle.shortName}")).mkString("_")
TLXbar.circuit(policy, node.in, node.out)
}
}
object TLXbar
{
def mapInputIds(ports: Seq[TLMasterPortParameters]) = assignRanges(ports.map(_.endSourceId))
def mapOutputIds(ports: Seq[TLSlavePortParameters]) = assignRanges(ports.map(_.endSinkId))
def assignRanges(sizes: Seq[Int]) = {
val pow2Sizes = sizes.map { z => if (z == 0) 0 else 1 << log2Ceil(z) }
val tuples = pow2Sizes.zipWithIndex.sortBy(_._1) // record old index, then sort by increasing size
val starts = tuples.scanRight(0)(_._1 + _).tail // suffix-sum of the sizes = the start positions
val ranges = (tuples zip starts) map { case ((sz, i), st) =>
(if (sz == 0) IdRange(0, 0) else IdRange(st, st + sz), i)
}
ranges.sortBy(_._2).map(_._1) // Restore orignal order
}
def relabeler() = {
var idFactory = 0
() => {
val fifoMap = scala.collection.mutable.HashMap.empty[Int, Int]
(x: Int) => {
if (fifoMap.contains(x)) fifoMap(x) else {
val out = idFactory
idFactory = idFactory + 1
fifoMap += (x -> out)
out
}
}
}
}
def circuit(policy: TLArbiter.Policy, seqIn: Seq[(TLBundle, TLEdge)], seqOut: Seq[(TLBundle, TLEdge)]) {
val (io_in, edgesIn) = seqIn.unzip
val (io_out, edgesOut) = seqOut.unzip
// Not every master need connect to every slave on every channel; determine which connections are necessary
val reachableIO = edgesIn.map { cp => edgesOut.map { mp =>
cp.client.clients.exists { c => mp.manager.managers.exists { m =>
c.visibility.exists { ca => m.address.exists { ma =>
ca.overlaps(ma)}}}}
}.toVector}.toVector
val probeIO = (edgesIn zip reachableIO).map { case (cp, reachableO) =>
(edgesOut zip reachableO).map { case (mp, reachable) =>
reachable && cp.client.anySupportProbe && mp.manager.managers.exists(_.regionType >= RegionType.TRACKED)
}.toVector}.toVector
val releaseIO = (edgesIn zip reachableIO).map { case (cp, reachableO) =>
(edgesOut zip reachableO).map { case (mp, reachable) =>
reachable && cp.client.anySupportProbe && mp.manager.anySupportAcquireB
}.toVector}.toVector
val connectAIO = reachableIO
val connectBIO = probeIO
val connectCIO = releaseIO
val connectDIO = reachableIO
val connectEIO = releaseIO
def transpose[T](x: Seq[Seq[T]]) = if (x.isEmpty) Nil else Vector.tabulate(x(0).size) { i => Vector.tabulate(x.size) { j => x(j)(i) } }
val connectAOI = transpose(connectAIO)
val connectBOI = transpose(connectBIO)
val connectCOI = transpose(connectCIO)
val connectDOI = transpose(connectDIO)
val connectEOI = transpose(connectEIO)
// Grab the port ID mapping
val inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client))
val outputIdRanges = TLXbar.mapOutputIds(edgesOut.map(_.manager))
// We need an intermediate size of bundle with the widest possible identifiers
val wide_bundle = TLBundleParameters.union(io_in.map(_.params) ++ io_out.map(_.params))
// Handle size = 1 gracefully (Chisel3 empty range is broken)
def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0)
// Transform input bundle sources (sinks use global namespace on both sides)
val in = Wire(Vec(io_in.size, TLBundle(wide_bundle)))
for (i <- 0 until in.size) {
val r = inputIdRanges(i)
if (connectAIO(i).exists(x=>x)) {
in(i).a.bits.user := DontCare
in(i).a.squeezeAll.waiveAll :<>= io_in(i).a.squeezeAll.waiveAll
in(i).a.bits.source := io_in(i).a.bits.source | r.start.U
} else {
in(i).a := DontCare
io_in(i).a := DontCare
in(i).a.valid := false.B
io_in(i).a.ready := true.B
}
if (connectBIO(i).exists(x=>x)) {
io_in(i).b.squeezeAll :<>= in(i).b.squeezeAll
io_in(i).b.bits.source := trim(in(i).b.bits.source, r.size)
} else {
in(i).b := DontCare
io_in(i).b := DontCare
in(i).b.ready := true.B
io_in(i).b.valid := false.B
}
if (connectCIO(i).exists(x=>x)) {
in(i).c.bits.user := DontCare
in(i).c.squeezeAll.waiveAll :<>= io_in(i).c.squeezeAll.waiveAll
in(i).c.bits.source := io_in(i).c.bits.source | r.start.U
} else {
in(i).c := DontCare
io_in(i).c := DontCare
in(i).c.valid := false.B
io_in(i).c.ready := true.B
}
if (connectDIO(i).exists(x=>x)) {
io_in(i).d.squeezeAll.waiveAll :<>= in(i).d.squeezeAll.waiveAll
io_in(i).d.bits.source := trim(in(i).d.bits.source, r.size)
} else {
in(i).d := DontCare
io_in(i).d := DontCare
in(i).d.ready := true.B
io_in(i).d.valid := false.B
}
if (connectEIO(i).exists(x=>x)) {
in(i).e.squeezeAll :<>= io_in(i).e.squeezeAll
} else {
in(i).e := DontCare
io_in(i).e := DontCare
in(i).e.valid := false.B
io_in(i).e.ready := true.B
}
}
// Transform output bundle sinks (sources use global namespace on both sides)
val out = Wire(Vec(io_out.size, TLBundle(wide_bundle)))
for (o <- 0 until out.size) {
val r = outputIdRanges(o)
if (connectAOI(o).exists(x=>x)) {
out(o).a.bits.user := DontCare
io_out(o).a.squeezeAll.waiveAll :<>= out(o).a.squeezeAll.waiveAll
} else {
out(o).a := DontCare
io_out(o).a := DontCare
out(o).a.ready := true.B
io_out(o).a.valid := false.B
}
if (connectBOI(o).exists(x=>x)) {
out(o).b.squeezeAll :<>= io_out(o).b.squeezeAll
} else {
out(o).b := DontCare
io_out(o).b := DontCare
out(o).b.valid := false.B
io_out(o).b.ready := true.B
}
if (connectCOI(o).exists(x=>x)) {
out(o).c.bits.user := DontCare
io_out(o).c.squeezeAll.waiveAll :<>= out(o).c.squeezeAll.waiveAll
} else {
out(o).c := DontCare
io_out(o).c := DontCare
out(o).c.ready := true.B
io_out(o).c.valid := false.B
}
if (connectDOI(o).exists(x=>x)) {
out(o).d.squeezeAll :<>= io_out(o).d.squeezeAll
out(o).d.bits.sink := io_out(o).d.bits.sink | r.start.U
} else {
out(o).d := DontCare
io_out(o).d := DontCare
out(o).d.valid := false.B
io_out(o).d.ready := true.B
}
if (connectEOI(o).exists(x=>x)) {
io_out(o).e.squeezeAll :<>= out(o).e.squeezeAll
io_out(o).e.bits.sink := trim(out(o).e.bits.sink, r.size)
} else {
out(o).e := DontCare
io_out(o).e := DontCare
out(o).e.ready := true.B
io_out(o).e.valid := false.B
}
}
// Filter a list to only those elements selected
def filter[T](data: Seq[T], mask: Seq[Boolean]) = (data zip mask).filter(_._2).map(_._1)
// Based on input=>output connectivity, create per-input minimal address decode circuits
val requiredAC = (connectAIO ++ connectCIO).distinct
val outputPortFns: Map[Vector[Boolean], Seq[UInt => Bool]] = requiredAC.map { connectO =>
val port_addrs = edgesOut.map(_.manager.managers.flatMap(_.address))
val routingMask = AddressDecoder(filter(port_addrs, connectO))
val route_addrs = port_addrs.map(seq => AddressSet.unify(seq.map(_.widen(~routingMask)).distinct))
// Print the address mapping
if (false) {
println("Xbar mapping:")
route_addrs.foreach { p =>
print(" ")
p.foreach { a => print(s" ${a}") }
println("")
}
println("--")
}
(connectO, route_addrs.map(seq => (addr: UInt) => seq.map(_.contains(addr)).reduce(_ || _)))
}.toMap
// Print the ID mapping
if (false) {
println(s"XBar mapping:")
(edgesIn zip inputIdRanges).zipWithIndex.foreach { case ((edge, id), i) =>
println(s"\t$i assigned ${id} for ${edge.client.clients.map(_.name).mkString(", ")}")
}
println("")
}
val addressA = (in zip edgesIn) map { case (i, e) => e.address(i.a.bits) }
val addressC = (in zip edgesIn) map { case (i, e) => e.address(i.c.bits) }
def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B
val requestAIO = (connectAIO zip addressA) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } }
val requestCIO = (connectCIO zip addressC) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } }
val requestBOI = out.map { o => inputIdRanges.map { i => i.contains(o.b.bits.source) } }
val requestDOI = out.map { o => inputIdRanges.map { i => i.contains(o.d.bits.source) } }
val requestEIO = in.map { i => outputIdRanges.map { o => o.contains(i.e.bits.sink) } }
val beatsAI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.a.bits) }
val beatsBO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.b.bits) }
val beatsCI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.c.bits) }
val beatsDO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.d.bits) }
val beatsEI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.e.bits) }
// Fanout the input sources to the output sinks
val portsAOI = transpose((in zip requestAIO) map { case (i, r) => TLXbar.fanout(i.a, r, edgesOut.map(_.params(ForceFanoutKey).a)) })
val portsBIO = transpose((out zip requestBOI) map { case (o, r) => TLXbar.fanout(o.b, r, edgesIn .map(_.params(ForceFanoutKey).b)) })
val portsCOI = transpose((in zip requestCIO) map { case (i, r) => TLXbar.fanout(i.c, r, edgesOut.map(_.params(ForceFanoutKey).c)) })
val portsDIO = transpose((out zip requestDOI) map { case (o, r) => TLXbar.fanout(o.d, r, edgesIn .map(_.params(ForceFanoutKey).d)) })
val portsEOI = transpose((in zip requestEIO) map { case (i, r) => TLXbar.fanout(i.e, r, edgesOut.map(_.params(ForceFanoutKey).e)) })
// Arbitrate amongst the sources
for (o <- 0 until out.size) {
TLArbiter(policy)(out(o).a, filter(beatsAI zip portsAOI(o), connectAOI(o)):_*)
TLArbiter(policy)(out(o).c, filter(beatsCI zip portsCOI(o), connectCOI(o)):_*)
TLArbiter(policy)(out(o).e, filter(beatsEI zip portsEOI(o), connectEOI(o)):_*)
filter(portsAOI(o), connectAOI(o).map(!_)) foreach { r => r.ready := false.B }
filter(portsCOI(o), connectCOI(o).map(!_)) foreach { r => r.ready := false.B }
filter(portsEOI(o), connectEOI(o).map(!_)) foreach { r => r.ready := false.B }
}
for (i <- 0 until in.size) {
TLArbiter(policy)(in(i).b, filter(beatsBO zip portsBIO(i), connectBIO(i)):_*)
TLArbiter(policy)(in(i).d, filter(beatsDO zip portsDIO(i), connectDIO(i)):_*)
filter(portsBIO(i), connectBIO(i).map(!_)) foreach { r => r.ready := false.B }
filter(portsDIO(i), connectDIO(i).map(!_)) foreach { r => r.ready := false.B }
}
}
def apply(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode =
{
val xbar = LazyModule(new TLXbar(policy, nameSuffix))
xbar.node
}
// Replicate an input port to each output port
def fanout[T <: TLChannel](input: DecoupledIO[T], select: Seq[Bool], force: Seq[Boolean] = Nil): Seq[DecoupledIO[T]] = {
val filtered = Wire(Vec(select.size, chiselTypeOf(input)))
for (i <- 0 until select.size) {
filtered(i).bits := (if (force.lift(i).getOrElse(false)) IdentityModule(input.bits) else input.bits)
filtered(i).valid := input.valid && (select(i) || (select.size == 1).B)
}
input.ready := Mux1H(select, filtered.map(_.ready))
filtered
}
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMXbar(nManagers: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("Xbar"))
val xbar = LazyModule(new TLXbar)
xbar.node := TLDelayer(0.1) := model.node := fuzz.node
(0 until nManagers) foreach { n =>
val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff)))
ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMXbarTest(nManagers: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMXbar(nManagers,txns)).module)
dut.io.start := io.start
io.finished := dut.io.finished
}
class TLMulticlientXbar(nManagers: Int, nClients: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val xbar = LazyModule(new TLXbar)
val fuzzers = (0 until nClients) map { n =>
val fuzz = LazyModule(new TLFuzzer(txns))
xbar.node := TLDelayer(0.1) := fuzz.node
fuzz
}
(0 until nManagers) foreach { n =>
val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff)))
ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzzers.last.module.io.finished
}
}
class TLMulticlientXbarTest(nManagers: Int, nClients: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLMulticlientXbar(nManagers, nClients, txns)).module)
dut.io.start := io.start
io.finished := dut.io.finished
}
| module TLXbar_sbus_i2_o2_a32d128s7k4z4c( // @[Xbar.scala:74:9]
input clock, // @[Xbar.scala:74:9]
input reset, // @[Xbar.scala:74:9]
output auto_anon_in_1_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_1_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_1_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_1_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_anon_in_1_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_anon_in_1_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_anon_in_1_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [15:0] auto_anon_in_1_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [127:0] auto_anon_in_1_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_1_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_1_b_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_1_b_valid, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_anon_in_1_b_bits_param, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_anon_in_1_b_bits_address, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_1_c_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_1_c_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_1_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_1_c_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_anon_in_1_c_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_anon_in_1_c_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_anon_in_1_c_bits_address, // @[LazyModuleImp.scala:107:25]
input [127:0] auto_anon_in_1_c_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_1_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_1_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_1_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_in_1_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_anon_in_1_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_anon_in_1_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_anon_in_1_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_anon_in_1_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_1_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [127:0] auto_anon_in_1_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_1_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_1_e_valid, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_anon_in_1_e_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_0_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_0_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_0_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_0_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_anon_in_0_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_anon_in_0_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_anon_in_0_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [15:0] auto_anon_in_0_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [127:0] auto_anon_in_0_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_0_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_0_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_0_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_in_0_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_anon_in_0_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_anon_in_0_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_anon_in_0_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_anon_in_0_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_0_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [127:0] auto_anon_in_0_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_0_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_1_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_1_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_1_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_1_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_1_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [6:0] auto_anon_out_1_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_anon_out_1_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [15:0] auto_anon_out_1_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [127:0] auto_anon_out_1_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_1_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_1_b_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_1_b_valid, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_anon_out_1_b_bits_param, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_anon_out_1_b_bits_address, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_1_c_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_1_c_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_1_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_1_c_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_1_c_bits_size, // @[LazyModuleImp.scala:107:25]
output [6:0] auto_anon_out_1_c_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_anon_out_1_c_bits_address, // @[LazyModuleImp.scala:107:25]
output [127:0] auto_anon_out_1_c_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_1_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_1_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_1_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_out_1_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_anon_out_1_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_out_1_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [6:0] auto_anon_out_1_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_anon_out_1_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_1_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [127:0] auto_anon_out_1_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_1_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_1_e_valid, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_anon_out_1_e_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_0_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_0_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_0_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_0_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_anon_out_0_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [6:0] auto_anon_out_0_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [28:0] auto_anon_out_0_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [15:0] auto_anon_out_0_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [127:0] auto_anon_out_0_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_0_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_0_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_0_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_out_0_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_anon_out_0_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_anon_out_0_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [6:0] auto_anon_out_0_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_0_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_0_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [127:0] auto_anon_out_0_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_0_d_bits_corrupt // @[LazyModuleImp.scala:107:25]
);
wire allowed_3_1; // @[Arbiter.scala:92:24]
wire allowed_3_0; // @[Arbiter.scala:92:24]
wire allowed_2_1; // @[Arbiter.scala:92:24]
wire allowed_2_0; // @[Arbiter.scala:92:24]
wire allowed_1_1; // @[Arbiter.scala:92:24]
wire allowed_1_0; // @[Arbiter.scala:92:24]
wire allowed_1; // @[Arbiter.scala:92:24]
wire allowed_0; // @[Arbiter.scala:92:24]
wire [6:0] in_0_a_bits_source = {2'h2, auto_anon_in_0_a_bits_source}; // @[Xbar.scala:74:9, :166:55]
wire [6:0] in_1_a_bits_source = {1'h0, auto_anon_in_1_a_bits_source}; // @[Xbar.scala:74:9, :166:29]
wire [3:0] out_0_d_bits_sink = {3'h0, auto_anon_out_0_d_bits_sink}; // @[Xbar.scala:251:28]
wire [3:0] out_1_d_bits_size = {1'h0, auto_anon_out_1_d_bits_size}; // @[Xbar.scala:74:9, :250:29]
wire requestAIO_0_0 = {auto_anon_in_0_a_bits_address[31], auto_anon_in_0_a_bits_address[27:26]} == 3'h0 | {auto_anon_in_0_a_bits_address[31], auto_anon_in_0_a_bits_address[27:26], ~(auto_anon_in_0_a_bits_address[16]), auto_anon_in_0_a_bits_address[12]} == 5'h0 | {auto_anon_in_0_a_bits_address[31], ~(auto_anon_in_0_a_bits_address[27:26])} == 3'h0; // @[Xbar.scala:291:92]
wire requestAIO_0_1 = {auto_anon_in_0_a_bits_address[31], auto_anon_in_0_a_bits_address[27:26] ^ 2'h2, auto_anon_in_0_a_bits_address[16]} == 4'h0 | auto_anon_in_0_a_bits_address[31]; // @[Xbar.scala:74:9, :291:92]
wire requestAIO_1_0 = {auto_anon_in_1_a_bits_address[31], auto_anon_in_1_a_bits_address[27:26]} == 3'h0 | {auto_anon_in_1_a_bits_address[31], auto_anon_in_1_a_bits_address[27:26], ~(auto_anon_in_1_a_bits_address[16]), auto_anon_in_1_a_bits_address[12]} == 5'h0 | {auto_anon_in_1_a_bits_address[31], ~(auto_anon_in_1_a_bits_address[27:26])} == 3'h0; // @[Xbar.scala:291:92]
wire requestAIO_1_1 = {auto_anon_in_1_a_bits_address[31], auto_anon_in_1_a_bits_address[27:26] ^ 2'h2, auto_anon_in_1_a_bits_address[16]} == 4'h0 | auto_anon_in_1_a_bits_address[31]; // @[Xbar.scala:74:9, :291:92]
wire requestDOI_0_0 = auto_anon_out_0_d_bits_source[6:5] == 2'h2; // @[Xbar.scala:74:9]
wire requestDOI_1_0 = auto_anon_out_1_d_bits_source[6:5] == 2'h2; // @[Xbar.scala:74:9]
wire portsAOI_filtered_0_valid = auto_anon_in_0_a_valid & requestAIO_0_0; // @[Xbar.scala:291:92, :355:40]
wire portsAOI_filtered_1_valid = auto_anon_in_0_a_valid & requestAIO_0_1; // @[Xbar.scala:291:92, :355:40]
wire _portsAOI_in_0_a_ready_T_2 = requestAIO_0_0 & auto_anon_out_0_a_ready & allowed_0 | requestAIO_0_1 & auto_anon_out_1_a_ready & allowed_1_0; // @[Mux.scala:30:73]
wire portsAOI_filtered_1_0_valid = auto_anon_in_1_a_valid & requestAIO_1_0; // @[Xbar.scala:291:92, :355:40]
wire portsAOI_filtered_1_1_valid = auto_anon_in_1_a_valid & requestAIO_1_1; // @[Xbar.scala:291:92, :355:40]
wire _portsAOI_in_1_a_ready_T_2 = requestAIO_1_0 & auto_anon_out_0_a_ready & allowed_1 | requestAIO_1_1 & auto_anon_out_1_a_ready & allowed_1_1; // @[Mux.scala:30:73]
wire portsDIO_filtered_0_valid = auto_anon_out_0_d_valid & requestDOI_0_0; // @[Xbar.scala:355:40]
wire portsDIO_filtered_1_valid = auto_anon_out_0_d_valid & ~(auto_anon_out_0_d_bits_source[6]); // @[Xbar.scala:355:40]
wire portsDIO_filtered_1_0_valid = auto_anon_out_1_d_valid & requestDOI_1_0; // @[Xbar.scala:355:40]
wire portsDIO_filtered_1_1_valid = auto_anon_out_1_d_valid & ~(auto_anon_out_1_d_bits_source[6]); // @[Xbar.scala:355:40]
reg [7:0] beatsLeft; // @[Arbiter.scala:60:30]
wire idle = beatsLeft == 8'h0; // @[Arbiter.scala:60:30, :61:28]
wire [1:0] readys_valid = {portsAOI_filtered_1_0_valid, portsAOI_filtered_0_valid}; // @[Xbar.scala:355:40]
reg [1:0] readys_mask; // @[Arbiter.scala:23:23]
wire [1:0] _readys_filter_T_1 = readys_valid & ~readys_mask; // @[Arbiter.scala:23:23, :24:{28,30}, :68:51]
wire [1:0] readys_readys = ~({readys_mask[1], _readys_filter_T_1[1] | readys_mask[0]} & ({_readys_filter_T_1[0], portsAOI_filtered_1_0_valid} | _readys_filter_T_1)); // @[package.scala:262:43]
wire winner_0 = readys_readys[0] & portsAOI_filtered_0_valid; // @[Xbar.scala:355:40]
wire winner_1 = readys_readys[1] & portsAOI_filtered_1_0_valid; // @[Xbar.scala:355:40]
wire _out_0_a_valid_T = portsAOI_filtered_0_valid | portsAOI_filtered_1_0_valid; // @[Xbar.scala:355:40]
reg state_0; // @[Arbiter.scala:88:26]
reg state_1; // @[Arbiter.scala:88:26]
wire muxState_0 = idle ? winner_0 : state_0; // @[Arbiter.scala:61:28, :71:69, :88:26, :89:25]
wire muxState_1 = idle ? winner_1 : state_1; // @[Arbiter.scala:61:28, :71:69, :88:26, :89:25]
assign allowed_0 = idle ? readys_readys[0] : state_0; // @[Arbiter.scala:26:18, :61:28, :68:76, :88:26, :92:24]
assign allowed_1 = idle ? readys_readys[1] : state_1; // @[Arbiter.scala:26:18, :61:28, :68:76, :88:26, :92:24]
wire out_0_a_valid = idle ? _out_0_a_valid_T : state_0 & portsAOI_filtered_0_valid | state_1 & portsAOI_filtered_1_0_valid; // @[Mux.scala:30:73]
reg [7:0] beatsLeft_1; // @[Arbiter.scala:60:30]
wire idle_1 = beatsLeft_1 == 8'h0; // @[Arbiter.scala:60:30, :61:28]
wire [1:0] readys_valid_1 = {portsAOI_filtered_1_1_valid, portsAOI_filtered_1_valid}; // @[Xbar.scala:355:40]
reg [1:0] readys_mask_1; // @[Arbiter.scala:23:23]
wire [1:0] _readys_filter_T_3 = readys_valid_1 & ~readys_mask_1; // @[Arbiter.scala:23:23, :24:{28,30}, :68:51]
wire [1:0] readys_readys_1 = ~({readys_mask_1[1], _readys_filter_T_3[1] | readys_mask_1[0]} & ({_readys_filter_T_3[0], portsAOI_filtered_1_1_valid} | _readys_filter_T_3)); // @[package.scala:262:43]
wire winner_1_0 = readys_readys_1[0] & portsAOI_filtered_1_valid; // @[Xbar.scala:355:40]
wire winner_1_1 = readys_readys_1[1] & portsAOI_filtered_1_1_valid; // @[Xbar.scala:355:40]
wire _out_1_a_valid_T = portsAOI_filtered_1_valid | portsAOI_filtered_1_1_valid; // @[Xbar.scala:355:40]
reg state_1_0; // @[Arbiter.scala:88:26]
reg state_1_1; // @[Arbiter.scala:88:26]
wire muxState_1_0 = idle_1 ? winner_1_0 : state_1_0; // @[Arbiter.scala:61:28, :71:69, :88:26, :89:25]
wire muxState_1_1 = idle_1 ? winner_1_1 : state_1_1; // @[Arbiter.scala:61:28, :71:69, :88:26, :89:25]
assign allowed_1_0 = idle_1 ? readys_readys_1[0] : state_1_0; // @[Arbiter.scala:26:18, :61:28, :68:76, :88:26, :92:24]
assign allowed_1_1 = idle_1 ? readys_readys_1[1] : state_1_1; // @[Arbiter.scala:26:18, :61:28, :68:76, :88:26, :92:24]
wire out_1_a_valid = idle_1 ? _out_1_a_valid_T : state_1_0 & portsAOI_filtered_1_valid | state_1_1 & portsAOI_filtered_1_1_valid; // @[Mux.scala:30:73]
reg [7:0] beatsLeft_2; // @[Arbiter.scala:60:30]
wire idle_2 = beatsLeft_2 == 8'h0; // @[Arbiter.scala:60:30, :61:28]
wire [1:0] readys_valid_2 = {portsDIO_filtered_1_0_valid, portsDIO_filtered_0_valid}; // @[Xbar.scala:355:40]
reg [1:0] readys_mask_2; // @[Arbiter.scala:23:23]
wire [1:0] _readys_filter_T_5 = readys_valid_2 & ~readys_mask_2; // @[Arbiter.scala:23:23, :24:{28,30}, :68:51]
wire [1:0] readys_readys_2 = ~({readys_mask_2[1], _readys_filter_T_5[1] | readys_mask_2[0]} & ({_readys_filter_T_5[0], portsDIO_filtered_1_0_valid} | _readys_filter_T_5)); // @[package.scala:262:43]
wire winner_2_0 = readys_readys_2[0] & portsDIO_filtered_0_valid; // @[Xbar.scala:355:40]
wire winner_2_1 = readys_readys_2[1] & portsDIO_filtered_1_0_valid; // @[Xbar.scala:355:40]
wire _in_0_d_valid_T = portsDIO_filtered_0_valid | portsDIO_filtered_1_0_valid; // @[Xbar.scala:355:40]
reg state_2_0; // @[Arbiter.scala:88:26]
reg state_2_1; // @[Arbiter.scala:88:26]
wire muxState_2_0 = idle_2 ? winner_2_0 : state_2_0; // @[Arbiter.scala:61:28, :71:69, :88:26, :89:25]
wire muxState_2_1 = idle_2 ? winner_2_1 : state_2_1; // @[Arbiter.scala:61:28, :71:69, :88:26, :89:25]
assign allowed_2_0 = idle_2 ? readys_readys_2[0] : state_2_0; // @[Arbiter.scala:26:18, :61:28, :68:76, :88:26, :92:24]
assign allowed_2_1 = idle_2 ? readys_readys_2[1] : state_2_1; // @[Arbiter.scala:26:18, :61:28, :68:76, :88:26, :92:24]
wire in_0_d_valid = idle_2 ? _in_0_d_valid_T : state_2_0 & portsDIO_filtered_0_valid | state_2_1 & portsDIO_filtered_1_0_valid; // @[Mux.scala:30:73]
wire _in_0_d_bits_T_2 = muxState_2_0 & auto_anon_out_0_d_bits_corrupt | muxState_2_1 & auto_anon_out_1_d_bits_corrupt; // @[Mux.scala:30:73]
wire _in_0_d_bits_T_8 = muxState_2_0 & auto_anon_out_0_d_bits_denied | muxState_2_1 & auto_anon_out_1_d_bits_denied; // @[Mux.scala:30:73]
wire [3:0] _in_0_d_bits_T_11 = (muxState_2_0 ? out_0_d_bits_sink : 4'h0) | (muxState_2_1 ? auto_anon_out_1_d_bits_sink : 4'h0); // @[Mux.scala:30:73]
wire [4:0] _in_0_d_bits_T_14 = (muxState_2_0 ? auto_anon_out_0_d_bits_source[4:0] : 5'h0) | (muxState_2_1 ? auto_anon_out_1_d_bits_source[4:0] : 5'h0); // @[Mux.scala:30:73]
wire [3:0] _in_0_d_bits_T_17 = (muxState_2_0 ? auto_anon_out_0_d_bits_size : 4'h0) | (muxState_2_1 ? out_1_d_bits_size : 4'h0); // @[Mux.scala:30:73]
wire [1:0] _in_0_d_bits_T_20 = (muxState_2_0 ? auto_anon_out_0_d_bits_param : 2'h0) | (muxState_2_1 ? auto_anon_out_1_d_bits_param : 2'h0); // @[Mux.scala:30:73]
wire [2:0] _in_0_d_bits_T_23 = (muxState_2_0 ? auto_anon_out_0_d_bits_opcode : 3'h0) | (muxState_2_1 ? auto_anon_out_1_d_bits_opcode : 3'h0); // @[Mux.scala:30:73]
reg [7:0] beatsLeft_3; // @[Arbiter.scala:60:30]
wire idle_3 = beatsLeft_3 == 8'h0; // @[Arbiter.scala:60:30, :61:28]
wire [1:0] readys_valid_3 = {portsDIO_filtered_1_1_valid, portsDIO_filtered_1_valid}; // @[Xbar.scala:355:40]
reg [1:0] readys_mask_3; // @[Arbiter.scala:23:23]
wire [1:0] _readys_filter_T_7 = readys_valid_3 & ~readys_mask_3; // @[Arbiter.scala:23:23, :24:{28,30}, :68:51]
wire [1:0] readys_readys_3 = ~({readys_mask_3[1], _readys_filter_T_7[1] | readys_mask_3[0]} & ({_readys_filter_T_7[0], portsDIO_filtered_1_1_valid} | _readys_filter_T_7)); // @[package.scala:262:43]
wire winner_3_0 = readys_readys_3[0] & portsDIO_filtered_1_valid; // @[Xbar.scala:355:40]
wire winner_3_1 = readys_readys_3[1] & portsDIO_filtered_1_1_valid; // @[Xbar.scala:355:40]
wire _in_1_d_valid_T = portsDIO_filtered_1_valid | portsDIO_filtered_1_1_valid; // @[Xbar.scala:355:40] |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
| module OptimizationBarrier_TLBEntryData_39( // @[package.scala:267:30]
input clock, // @[package.scala:267:30]
input reset, // @[package.scala:267:30]
input [19:0] io_x_ppn, // @[package.scala:268:18]
input io_x_u, // @[package.scala:268:18]
input io_x_g, // @[package.scala:268:18]
input io_x_ae_ptw, // @[package.scala:268:18]
input io_x_ae_final, // @[package.scala:268:18]
input io_x_ae_stage2, // @[package.scala:268:18]
input io_x_pf, // @[package.scala:268:18]
input io_x_gf, // @[package.scala:268:18]
input io_x_sw, // @[package.scala:268:18]
input io_x_sx, // @[package.scala:268:18]
input io_x_sr, // @[package.scala:268:18]
input io_x_hw, // @[package.scala:268:18]
input io_x_hx, // @[package.scala:268:18]
input io_x_hr, // @[package.scala:268:18]
input io_x_pw, // @[package.scala:268:18]
input io_x_px, // @[package.scala:268:18]
input io_x_pr, // @[package.scala:268:18]
input io_x_ppp, // @[package.scala:268:18]
input io_x_pal, // @[package.scala:268:18]
input io_x_paa, // @[package.scala:268:18]
input io_x_eff, // @[package.scala:268:18]
input io_x_c, // @[package.scala:268:18]
input io_x_fragmented_superpage, // @[package.scala:268:18]
output [19:0] io_y_ppn, // @[package.scala:268:18]
output io_y_u, // @[package.scala:268:18]
output io_y_ae_ptw, // @[package.scala:268:18]
output io_y_ae_final, // @[package.scala:268:18]
output io_y_ae_stage2, // @[package.scala:268:18]
output io_y_pf, // @[package.scala:268:18]
output io_y_gf, // @[package.scala:268:18]
output io_y_sw, // @[package.scala:268:18]
output io_y_sx, // @[package.scala:268:18]
output io_y_sr, // @[package.scala:268:18]
output io_y_hw, // @[package.scala:268:18]
output io_y_hx, // @[package.scala:268:18]
output io_y_hr, // @[package.scala:268:18]
output io_y_pw, // @[package.scala:268:18]
output io_y_px, // @[package.scala:268:18]
output io_y_pr, // @[package.scala:268:18]
output io_y_ppp, // @[package.scala:268:18]
output io_y_pal, // @[package.scala:268:18]
output io_y_paa, // @[package.scala:268:18]
output io_y_eff, // @[package.scala:268:18]
output io_y_c // @[package.scala:268:18]
);
wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30]
wire io_x_u_0 = io_x_u; // @[package.scala:267:30]
wire io_x_g_0 = io_x_g; // @[package.scala:267:30]
wire io_x_ae_ptw_0 = io_x_ae_ptw; // @[package.scala:267:30]
wire io_x_ae_final_0 = io_x_ae_final; // @[package.scala:267:30]
wire io_x_ae_stage2_0 = io_x_ae_stage2; // @[package.scala:267:30]
wire io_x_pf_0 = io_x_pf; // @[package.scala:267:30]
wire io_x_gf_0 = io_x_gf; // @[package.scala:267:30]
wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30]
wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30]
wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30]
wire io_x_hw_0 = io_x_hw; // @[package.scala:267:30]
wire io_x_hx_0 = io_x_hx; // @[package.scala:267:30]
wire io_x_hr_0 = io_x_hr; // @[package.scala:267:30]
wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30]
wire io_x_px_0 = io_x_px; // @[package.scala:267:30]
wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30]
wire io_x_ppp_0 = io_x_ppp; // @[package.scala:267:30]
wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30]
wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30]
wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30]
wire io_x_c_0 = io_x_c; // @[package.scala:267:30]
wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30]
wire [19:0] io_y_ppn_0 = io_x_ppn_0; // @[package.scala:267:30]
wire io_y_u_0 = io_x_u_0; // @[package.scala:267:30]
wire io_y_g = io_x_g_0; // @[package.scala:267:30]
wire io_y_ae_ptw_0 = io_x_ae_ptw_0; // @[package.scala:267:30]
wire io_y_ae_final_0 = io_x_ae_final_0; // @[package.scala:267:30]
wire io_y_ae_stage2_0 = io_x_ae_stage2_0; // @[package.scala:267:30]
wire io_y_pf_0 = io_x_pf_0; // @[package.scala:267:30]
wire io_y_gf_0 = io_x_gf_0; // @[package.scala:267:30]
wire io_y_sw_0 = io_x_sw_0; // @[package.scala:267:30]
wire io_y_sx_0 = io_x_sx_0; // @[package.scala:267:30]
wire io_y_sr_0 = io_x_sr_0; // @[package.scala:267:30]
wire io_y_hw_0 = io_x_hw_0; // @[package.scala:267:30]
wire io_y_hx_0 = io_x_hx_0; // @[package.scala:267:30]
wire io_y_hr_0 = io_x_hr_0; // @[package.scala:267:30]
wire io_y_pw_0 = io_x_pw_0; // @[package.scala:267:30]
wire io_y_px_0 = io_x_px_0; // @[package.scala:267:30]
wire io_y_pr_0 = io_x_pr_0; // @[package.scala:267:30]
wire io_y_ppp_0 = io_x_ppp_0; // @[package.scala:267:30]
wire io_y_pal_0 = io_x_pal_0; // @[package.scala:267:30]
wire io_y_paa_0 = io_x_paa_0; // @[package.scala:267:30]
wire io_y_eff_0 = io_x_eff_0; // @[package.scala:267:30]
wire io_y_c_0 = io_x_c_0; // @[package.scala:267:30]
wire io_y_fragmented_superpage = io_x_fragmented_superpage_0; // @[package.scala:267:30]
assign io_y_ppn = io_y_ppn_0; // @[package.scala:267:30]
assign io_y_u = io_y_u_0; // @[package.scala:267:30]
assign io_y_ae_ptw = io_y_ae_ptw_0; // @[package.scala:267:30]
assign io_y_ae_final = io_y_ae_final_0; // @[package.scala:267:30]
assign io_y_ae_stage2 = io_y_ae_stage2_0; // @[package.scala:267:30]
assign io_y_pf = io_y_pf_0; // @[package.scala:267:30]
assign io_y_gf = io_y_gf_0; // @[package.scala:267:30]
assign io_y_sw = io_y_sw_0; // @[package.scala:267:30]
assign io_y_sx = io_y_sx_0; // @[package.scala:267:30]
assign io_y_sr = io_y_sr_0; // @[package.scala:267:30]
assign io_y_hw = io_y_hw_0; // @[package.scala:267:30]
assign io_y_hx = io_y_hx_0; // @[package.scala:267:30]
assign io_y_hr = io_y_hr_0; // @[package.scala:267:30]
assign io_y_pw = io_y_pw_0; // @[package.scala:267:30]
assign io_y_px = io_y_px_0; // @[package.scala:267:30]
assign io_y_pr = io_y_pr_0; // @[package.scala:267:30]
assign io_y_ppp = io_y_ppp_0; // @[package.scala:267:30]
assign io_y_pal = io_y_pal_0; // @[package.scala:267:30]
assign io_y_paa = io_y_paa_0; // @[package.scala:267:30]
assign io_y_eff = io_y_eff_0; // @[package.scala:267:30]
assign io_y_c = io_y_c_0; // @[package.scala:267:30]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File FIFOFixer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.lazymodule._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.diplomacy.RegionType
import freechips.rocketchip.util.property
class TLFIFOFixer(policy: TLFIFOFixer.Policy = TLFIFOFixer.all)(implicit p: Parameters) extends LazyModule
{
private def fifoMap(seq: Seq[TLSlaveParameters]) = {
val (flatManagers, keepManagers) = seq.partition(policy)
// We need to be careful if one flatManager and one keepManager share an existing domain
// Erring on the side of caution, we will also flatten the keepManager in this case
val flatDomains = Set(flatManagers.flatMap(_.fifoId):_*) // => ID 0
val keepDomains = Set(keepManagers.flatMap(_.fifoId):_*) -- flatDomains // => IDs compacted
// Calculate what the FIFO domains look like after the fixer is applied
val flatMap = flatDomains.map { x => (x, 0) }.toMap
val keepMap = keepDomains.scanLeft((-1,0)) { case ((_,s),x) => (x, s+1) }.toMap
val map = flatMap ++ keepMap
val fixMap = seq.map { m => m.fifoId match {
case None => if (policy(m)) Some(0) else None
case Some(id) => Some(map(id)) // also flattens some who did not ask
} }
// Compress the FIFO domain space of those we are combining
val reMap = flatDomains.scanLeft((-1,-1)) { case ((_,s),x) => (x, s+1) }.toMap
val splatMap = seq.map { m => m.fifoId match {
case None => None
case Some(id) => reMap.lift(id)
} }
(fixMap, splatMap)
}
val node = new AdapterNode(TLImp)(
{ cp => cp },
{ mp =>
val (fixMap, _) = fifoMap(mp.managers)
mp.v1copy(managers = (fixMap zip mp.managers) map { case (id, m) => m.v1copy(fifoId = id) })
}) with TLFormatNode {
override def circuitIdentity = edges.in.map(_.client.clients.filter(c => c.requestFifo && c.sourceId.size > 1).size).sum == 0
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
val (fixMap, splatMap) = fifoMap(edgeOut.manager.managers)
// Do we need to serialize the request to this manager?
val a_notFIFO = edgeIn.manager.fastProperty(in.a.bits.address, _.fifoId != Some(0), (b:Boolean) => b.B)
// Compact the IDs of the cases we serialize
val compacted = ((fixMap zip splatMap) zip edgeOut.manager.managers) flatMap {
case ((f, s), m) => if (f == Some(0)) Some(m.v1copy(fifoId = s)) else None
}
val sinks = if (compacted.exists(_.supportsAcquireB)) edgeOut.manager.endSinkId else 0
val a_id = if (compacted.isEmpty) 0.U else
edgeOut.manager.v1copy(managers = compacted, endSinkId = sinks).findFifoIdFast(in.a.bits.address)
val a_noDomain = a_id === 0.U
if (false) {
println(s"FIFOFixer for: ${edgeIn.client.clients.map(_.name).mkString(", ")}")
println(s"make FIFO: ${edgeIn.manager.managers.filter(_.fifoId==Some(0)).map(_.name).mkString(", ")}")
println(s"not FIFO: ${edgeIn.manager.managers.filter(_.fifoId!=Some(0)).map(_.name).mkString(", ")}")
println(s"domains: ${compacted.groupBy(_.name).mapValues(_.map(_.fifoId))}")
println("")
}
// Count beats
val a_first = edgeIn.first(in.a)
val d_first = edgeOut.first(out.d) && out.d.bits.opcode =/= TLMessages.ReleaseAck
// Keep one bit for each source recording if there is an outstanding request that must be made FIFO
// Sources unused in the stall signal calculation should be pruned by DCE
val flight = RegInit(VecInit(Seq.fill(edgeIn.client.endSourceId) { false.B }))
when (a_first && in.a.fire) { flight(in.a.bits.source) := !a_notFIFO }
when (d_first && in.d.fire) { flight(in.d.bits.source) := false.B }
val stalls = edgeIn.client.clients.filter(c => c.requestFifo && c.sourceId.size > 1).map { c =>
val a_sel = c.sourceId.contains(in.a.bits.source)
val id = RegEnable(a_id, in.a.fire && a_sel && !a_notFIFO)
val track = flight.slice(c.sourceId.start, c.sourceId.end)
a_sel && a_first && track.reduce(_ || _) && (a_noDomain || id =/= a_id)
}
val stall = stalls.foldLeft(false.B)(_||_)
out.a <> in.a
in.d <> out.d
out.a.valid := in.a.valid && (a_notFIFO || !stall)
in.a.ready := out.a.ready && (a_notFIFO || !stall)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> out.b
out.c <> in .c
out.e <> in .e
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
//Functional cover properties
property.cover(in.a.valid && stall, "COVER FIFOFIXER STALL", "Cover: Stall occured for a valid transaction")
val SourceIdFIFOed = RegInit(0.U(edgeIn.client.endSourceId.W))
val SourceIdSet = WireDefault(0.U(edgeIn.client.endSourceId.W))
val SourceIdClear = WireDefault(0.U(edgeIn.client.endSourceId.W))
when (a_first && in.a.fire && !a_notFIFO) {
SourceIdSet := UIntToOH(in.a.bits.source)
}
when (d_first && in.d.fire) {
SourceIdClear := UIntToOH(in.d.bits.source)
}
SourceIdFIFOed := SourceIdFIFOed | SourceIdSet
val allIDs_FIFOed = SourceIdFIFOed===Fill(SourceIdFIFOed.getWidth, 1.U)
property.cover(allIDs_FIFOed, "COVER all sources", "Cover: FIFOFIXER covers all Source IDs")
//property.cover(flight.reduce(_ && _), "COVER full", "Cover: FIFO is full with all Source IDs")
property.cover(!(flight.reduce(_ || _)), "COVER empty", "Cover: FIFO is empty")
property.cover(SourceIdSet > 0.U, "COVER at least one push", "Cover: At least one Source ID is pushed")
property.cover(SourceIdClear > 0.U, "COVER at least one pop", "Cover: At least one Source ID is popped")
}
}
}
object TLFIFOFixer
{
// Which slaves should have their FIFOness combined?
// NOTE: this transformation is still only applied for masters with requestFifo
type Policy = TLSlaveParameters => Boolean
import RegionType._
val all: Policy = m => true
val allFIFO: Policy = m => m.fifoId.isDefined
val allVolatile: Policy = m => m.regionType <= VOLATILE
def apply(policy: Policy = all)(implicit p: Parameters): TLNode =
{
val fixer = LazyModule(new TLFIFOFixer(policy))
fixer.node
}
}
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File Fragmenter.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.amba.axi4
import chisel3._
import chisel3.util.{Mux1H, Queue, IrrevocableIO, log2Ceil, UIntToOH}
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.lazymodule.{LazyModule, LazyModuleImp}
import freechips.rocketchip.diplomacy.{AddressDecoder, AddressSet, TransferSizes}
import freechips.rocketchip.util.{ControlKey, SimpleBundleField, rightOR, leftOR, OH1ToOH, UIntToOH1}
case object AXI4FragLast extends ControlKey[Bool]("real_last")
case class AXI4FragLastField() extends SimpleBundleField(AXI4FragLast)(Output(Bool()), false.B)
/**
* AXI4 fragmenter. It breaks AXI4 burst transfer to single beat transfers.
*/
class AXI4Fragmenter()(implicit p: Parameters) extends LazyModule
{
val maxBeats = 1 << AXI4Parameters.lenBits
def expandTransfer(x: TransferSizes, beatBytes: Int, alignment: BigInt) =
if (!x) x else TransferSizes(x.min, alignment.min(maxBeats*beatBytes).intValue)
def mapSlave(s: AXI4SlaveParameters, beatBytes: Int) = s.copy(
supportsWrite = expandTransfer(s.supportsWrite, beatBytes, s.minAlignment),
supportsRead = expandTransfer(s.supportsRead, beatBytes, s.minAlignment),
interleavedId = None) // this breaks interleaving guarantees
def mapMaster(m: AXI4MasterParameters) = m.copy(aligned = true, maxFlight = None)
val node = AXI4AdapterNode(
masterFn = { mp => mp.copy(masters = mp.masters.map(m => mapMaster(m)), echoFields = AXI4FragLastField() +: mp.echoFields) },
slaveFn = { sp => sp.copy(slaves = sp.slaves .map(s => mapSlave(s, sp.beatBytes))) })
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
val slave = edgeOut.slave
val slaves = slave.slaves
val beatBytes = slave.beatBytes
val lgBytes = log2Ceil(beatBytes)
val master = edgeIn.master
val masters = master.masters
// We don't support fragmenting to sub-beat accesses
slaves.foreach { s =>
require (!s.supportsRead || s.supportsRead.contains(beatBytes))
require (!s.supportsWrite || s.supportsWrite.contains(beatBytes))
}
/* We need to decompose a request into
* FIXED => each beat is a new request
* WRAP/INCR => take xfr up to next power of two, capped by max size of target
*
* On AR and AW, we fragment one request into many
* On W we set 'last' on beats which are fragment boundaries
* On R we clear 'last' on the fragments being reassembled
* On B we clear 'valid' on the responses for the injected fragments
*
* AR=>R and AW+W=>B are completely independent state machines.
*/
/* Returns the number of beats to execute and the new address */
def fragment(a: IrrevocableIO[AXI4BundleA], supportedSizes1: Seq[Int]): (IrrevocableIO[AXI4BundleA], Bool, UInt) = {
val out = Wire(chiselTypeOf(a))
val busy = RegInit(false.B)
val r_addr = Reg(UInt(a.bits.params.addrBits.W))
val r_len = Reg(UInt(AXI4Parameters.lenBits.W))
val len = Mux(busy, r_len, a.bits.len)
val addr = Mux(busy, r_addr, a.bits.addr)
val lo = if (lgBytes == 0) 0.U else addr(lgBytes-1, 0)
val cutoff = AXI4Parameters.lenBits + lgBytes
val alignment = addr((a.bits.params.addrBits min cutoff)-1, lgBytes)
// We don't care about illegal addresses; bursts or no bursts... whatever circuit is simpler (AXI4ToTL will fix it)
// !!! think about this more -- what if illegal?
val sizes1 = (supportedSizes1 zip slave.slaves.map(_.address)).filter(_._1 >= 0).groupBy(_._1).mapValues(_.flatMap(_._2))
val reductionMask = AddressDecoder(sizes1.values.toList)
val support1 = Mux1H(sizes1.toList.map { case (v, a) => // maximum supported size-1 based on target address
(AddressSet.unify(a.map(_.widen(~reductionMask)).distinct).map(_.contains(addr)).reduce(_||_), v.U)
})
/* We need to compute the largest transfer allowed by the AXI len.
* len+1 is the number of beats to execute.
* We want the MSB(len+1)-1; one less than the largest power of two we could execute.
* There are two cases; either len is 2^n-1 in which case we leave it unchanged, ELSE
* fill the bits from highest to lowest, and shift right by one bit.
*/
val fillLow = rightOR(len) >> 1 // set all bits in positions < a set bit
val wipeHigh = ~(leftOR(~len)) // clear all bits in position >= a cleared bit
val remain1 = fillLow | wipeHigh // MSB(a.len+1)-1
val align1 = ~leftOR(alignment) // transfer size limited by address alignment
val maxSupported1 = remain1 & align1 & support1 // Take the minimum of all the limits
// Things that cause us to degenerate to a single beat
val fixed = a.bits.burst === AXI4Parameters.BURST_FIXED
val narrow = a.bits.size =/= lgBytes.U
val bad = fixed || narrow
// The number of beats-1 to execute
val beats1 = Mux(bad, 0.U, maxSupported1)
val beats = OH1ToOH(beats1) // beats1 + 1
val inc_addr = addr + (beats << a.bits.size) // address after adding transfer
val wrapMask = a.bits.bytes1() // only these bits may change, if wrapping
val mux_addr = WireDefault(inc_addr)
when (a.bits.burst === AXI4Parameters.BURST_WRAP) {
mux_addr := (inc_addr & wrapMask) | ~(~a.bits.addr | wrapMask)
}
when (a.bits.burst === AXI4Parameters.BURST_FIXED) {
mux_addr := a.bits.addr
}
val last = beats1 === len
a.ready := out.ready && last
out.valid := a.valid
out.bits :<= a.bits
out.bits.len := beats1
// We forcibly align every access. If the first beat was misaligned, the strb bits
// for the lower addresses must not have been set. Therefore, rounding the address
// down is harmless. We can do this after the address update algorithm, because the
// incremented values will be rounded down the same way. Furthermore, a subword
// offset cannot cause a premature wrap-around.
out.bits.addr := ~(~addr | UIntToOH1(a.bits.size, lgBytes))
when (out.fire) {
busy := !last
r_addr := mux_addr
r_len := len - beats
}
(out, last, beats)
}
// The size to which we will fragment the access
val readSizes1 = slaves.map(s => s.supportsRead .max/beatBytes-1)
val writeSizes1 = slaves.map(s => s.supportsWrite.max/beatBytes-1)
// Irrevocable queues in front because we want to accept the request before responses come back
val (in_ar, ar_last, _) = fragment(Queue.irrevocable(in.ar, 1, flow=true), readSizes1)
val (in_aw, aw_last, w_beats) = fragment(Queue.irrevocable(in.aw, 1, flow=true), writeSizes1)
// AXI ready may not depend on valid of other channels
// We cut wready here along with awready and arready before AXI4ToTL
val in_w = Queue.irrevocable(in.w, 1, flow=true)
// AR flow control; super easy
Connectable.waiveUnmatched(out.ar, in_ar) match {
case (lhs, rhs) => lhs :<>= rhs
}
out.ar.bits.echo(AXI4FragLast) := ar_last
// When does W channel start counting a new transfer
val wbeats_latched = RegInit(false.B)
val wbeats_ready = Wire(Bool())
val wbeats_valid = Wire(Bool())
when (wbeats_valid && wbeats_ready) { wbeats_latched := true.B }
when (out.aw.fire) { wbeats_latched := false.B }
// AW flow control
out.aw.valid := in_aw.valid && (wbeats_ready || wbeats_latched)
in_aw.ready := out.aw.ready && (wbeats_ready || wbeats_latched)
wbeats_valid := in_aw.valid && !wbeats_latched
Connectable.waiveUnmatched(out.aw.bits, in_aw.bits) match {
case (lhs, rhs) => lhs :<>= rhs
}
out.aw.bits.echo(AXI4FragLast) := aw_last
// We need to inject 'last' into the W channel fragments, count!
val w_counter = RegInit(0.U((AXI4Parameters.lenBits+1).W))
val w_idle = w_counter === 0.U
val w_todo = Mux(w_idle, Mux(wbeats_valid, w_beats, 0.U), w_counter)
val w_last = w_todo === 1.U
w_counter := w_todo - out.w.fire
assert (!out.w.fire || w_todo =/= 0.U) // underflow impossible
// W flow control
wbeats_ready := w_idle
out.w.valid := in_w.valid && (!wbeats_ready || wbeats_valid)
in_w.ready := out.w.ready && (!wbeats_ready || wbeats_valid)
out.w.bits :<= in_w.bits
out.w.bits.last := w_last
// We should also recreate the last last
assert (!out.w.valid || !in_w.bits.last || w_last)
// R flow control
val r_last = out.r.bits.echo(AXI4FragLast)
Connectable.waiveUnmatched(in.r, out.r) match {
case (lhs, rhs) => lhs :<>= rhs
}
in.r.bits.last := out.r.bits.last && r_last
// B flow control
val b_last = out.b.bits.echo(AXI4FragLast)
Connectable.waiveUnmatched(in.b, out.b) match {
case (lhs, rhs) => lhs :<>= rhs
}
in.b.valid := out.b.valid && b_last
out.b.ready := in.b.ready || !b_last
// Merge errors from dropped B responses
val error = RegInit(VecInit.fill(edgeIn.master.endId)(0.U(AXI4Parameters.respBits.W)))
in.b.bits.resp := out.b.bits.resp | error(out.b.bits.id)
(error zip UIntToOH(out.b.bits.id, edgeIn.master.endId).asBools) foreach { case (reg, sel) =>
when (sel && out.b.fire) { reg := Mux(b_last, 0.U, reg | out.b.bits.resp) }
}
}
}
}
object AXI4Fragmenter
{
def apply()(implicit p: Parameters): AXI4Node =
{
val axi4frag = LazyModule(new AXI4Fragmenter)
axi4frag.node
}
}
File IdIndexer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.amba.axi4
import chisel3._
import chisel3.util.{log2Ceil, Cat}
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.lazymodule.{LazyModule, LazyModuleImp}
import freechips.rocketchip.diplomacy.IdRange
import freechips.rocketchip.util.{ControlKey, SimpleBundleField}
case object AXI4ExtraId extends ControlKey[UInt]("extra_id")
case class AXI4ExtraIdField(width: Int) extends SimpleBundleField(AXI4ExtraId)(Output(UInt(width.W)), 0.U)
/** This adapter limits the set of FIFO domain ids used by outbound transactions.
*
* Extra AWID and ARID bits from upstream transactions are stored in a User Bits field called AXI4ExtraId,
* which values are expected to be echoed back to this adapter alongside any downstream response messages,
* and are then prepended to the RID and BID field to restore the original identifier.
*
* @param idBits is the desired number of A[W|R]ID bits to be used
*/
class AXI4IdIndexer(idBits: Int)(implicit p: Parameters) extends LazyModule
{
require (idBits >= 0, s"AXI4IdIndexer: idBits must be > 0, not $idBits")
val node = AXI4AdapterNode(
masterFn = { mp =>
// Create one new "master" per ID
val masters = Array.tabulate(1 << idBits) { i => AXI4MasterParameters(
name = "",
id = IdRange(i, i+1),
aligned = true,
maxFlight = Some(0))
}
// Accumulate the names of masters we squish
val names = Array.fill(1 << idBits) { new scala.collection.mutable.HashSet[String]() }
// Squash the information from original masters into new ID masters
mp.masters.foreach { m =>
for (i <- m.id.start until m.id.end) {
val j = i % (1 << idBits)
val accumulated = masters(j)
names(j) += m.name
masters(j) = accumulated.copy(
aligned = accumulated.aligned && m.aligned,
maxFlight = accumulated.maxFlight.flatMap { o => m.maxFlight.map { n => o+n } })
}
}
val finalNameStrings = names.map { n => if (n.isEmpty) "(unused)" else n.toList.mkString(", ") }
val bits = log2Ceil(mp.endId) - idBits
val field = if (bits > 0) Seq(AXI4ExtraIdField(bits)) else Nil
mp.copy(
echoFields = field ++ mp.echoFields,
masters = masters.zip(finalNameStrings).map { case (m, n) => m.copy(name = n) })
},
slaveFn = { sp => sp
})
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
// Leave everything mostly untouched
Connectable.waiveUnmatched(out.ar, in.ar) match {
case (lhs, rhs) => lhs.squeezeAll :<>= rhs.squeezeAll
}
Connectable.waiveUnmatched(out.aw, in.aw) match {
case (lhs, rhs) => lhs.squeezeAll :<>= rhs.squeezeAll
}
Connectable.waiveUnmatched(out.w, in.w) match {
case (lhs, rhs) => lhs.squeezeAll :<>= rhs.squeezeAll
}
Connectable.waiveUnmatched(in.b, out.b) match {
case (lhs, rhs) => lhs.squeezeAll :<>= rhs.squeezeAll
}
Connectable.waiveUnmatched(in.r, out.r) match {
case (lhs, rhs) => lhs.squeezeAll :<>= rhs.squeezeAll
}
val bits = log2Ceil(edgeIn.master.endId) - idBits
if (bits > 0) {
// (in.aX.bits.id >> idBits).width = bits > 0
out.ar.bits.echo(AXI4ExtraId) := in.ar.bits.id >> idBits
out.aw.bits.echo(AXI4ExtraId) := in.aw.bits.id >> idBits
// Special care is needed in case of 0 idBits, b/c .id has width 1 still
if (idBits == 0) {
out.ar.bits.id := 0.U
out.aw.bits.id := 0.U
in.r.bits.id := out.r.bits.echo(AXI4ExtraId)
in.b.bits.id := out.b.bits.echo(AXI4ExtraId)
} else {
in.r.bits.id := Cat(out.r.bits.echo(AXI4ExtraId), out.r.bits.id)
in.b.bits.id := Cat(out.b.bits.echo(AXI4ExtraId), out.b.bits.id)
}
}
}
}
}
object AXI4IdIndexer
{
def apply(idBits: Int)(implicit p: Parameters): AXI4Node =
{
val axi4index = LazyModule(new AXI4IdIndexer(idBits))
axi4index.node
}
}
File ToTL.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.amba.axi4
import chisel3._
import chisel3.util.{Cat, log2Up, log2Ceil, UIntToOH, Queue}
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.nodes.{MixedAdapterNode, InwardNodeImp}
import org.chipsalliance.diplomacy.lazymodule.{LazyModule, LazyModuleImp}
import freechips.rocketchip.amba.{AMBACorrupt, AMBAProt, AMBAProtField}
import freechips.rocketchip.diplomacy.{IdRange, IdMapEntry, TransferSizes}
import freechips.rocketchip.tilelink.{TLImp, TLMasterParameters, TLMasterPortParameters, TLArbiter}
import freechips.rocketchip.util.{OH1ToUInt, UIntToOH1}
case class AXI4ToTLIdMapEntry(tlId: IdRange, axi4Id: IdRange, name: String)
extends IdMapEntry
{
val from = axi4Id
val to = tlId
val isCache = false
val requestFifo = false
val maxTransactionsInFlight = Some(tlId.size)
}
case class AXI4ToTLNode(wcorrupt: Boolean)(implicit valName: ValName) extends MixedAdapterNode(AXI4Imp, TLImp)(
dFn = { case mp =>
mp.masters.foreach { m => require (m.maxFlight.isDefined, "AXI4 must include a transaction maximum per ID to convert to TL") }
val maxFlight = mp.masters.map(_.maxFlight.get).max
TLMasterPortParameters.v1(
clients = mp.masters.filter(_.maxFlight != Some(0)).flatMap { m =>
for (id <- m.id.start until m.id.end)
yield TLMasterParameters.v1(
name = s"${m.name} ID#${id}",
sourceId = IdRange(id * maxFlight*2, (id+1) * maxFlight*2), // R+W ids are distinct
nodePath = m.nodePath,
requestFifo = true)
},
echoFields = mp.echoFields,
requestFields = AMBAProtField() +: mp.requestFields,
responseKeys = mp.responseKeys)
},
uFn = { mp => AXI4SlavePortParameters(
slaves = mp.managers.map { m =>
val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits))
AXI4SlaveParameters(
address = m.address,
resources = m.resources,
regionType = m.regionType,
executable = m.executable,
nodePath = m.nodePath,
supportsWrite = m.supportsPutPartial.intersect(maxXfer),
supportsRead = m.supportsGet.intersect(maxXfer),
interleavedId = Some(0))}, // TL2 never interleaves D beats
beatBytes = mp.beatBytes,
minLatency = mp.minLatency,
responseFields = mp.responseFields,
requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt))
})
/**
* Convert AXI4 master to TileLink.
*
* You can use this adapter to connect external AXI4 masters to TileLink bus topology.
*
* Setting wcorrupt=true is insufficient to enable w.user.corrupt.
* One must additionally list it in the AXI4 master's requestFields.
*
* @param wcorrupt enable AMBACorrupt in w.user
*/
class AXI4ToTL(wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule
{
val node = AXI4ToTLNode(wcorrupt)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
val numIds = edgeIn.master.endId
val beatBytes = edgeOut.manager.beatBytes
val beatCountBits = AXI4Parameters.lenBits + (1 << AXI4Parameters.sizeBits) - 1
val maxFlight = edgeIn.master.masters.map(_.maxFlight.get).max
val logFlight = log2Ceil(maxFlight)
val txnCountBits = log2Ceil(maxFlight+1) // wrap-around must not block b_allow
val addedBits = logFlight + 1 // +1 for read vs. write source ID
require (edgeIn.master.masters(0).aligned)
edgeOut.manager.requireFifo()
// Look for an Error device to redirect bad requests
val errorDevs = edgeOut.manager.managers.filter(_.nodePath.last.lazyModule.className == "TLError")
require (!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.")
val errorDev = errorDevs.maxBy(_.maxTransfer)
val error = errorDev.address.head.base
require (errorDev.supportsPutPartial.contains(edgeOut.manager.maxTransfer),
s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support ${edgeOut.manager.maxTransfer}")
require (errorDev.supportsGet.contains(edgeOut.manager.maxTransfer),
s"Error device supports ${errorDev.supportsGet} Get but must support ${edgeOut.manager.maxTransfer}")
val r_out = WireDefault(out.a)
val r_size1 = in.ar.bits.bytes1()
val r_size = OH1ToUInt(r_size1)
val r_ok = edgeOut.manager.supportsGetSafe(in.ar.bits.addr, r_size)
val r_addr = Mux(r_ok, in.ar.bits.addr, error.U | in.ar.bits.addr(log2Up(beatBytes)-1, 0))
val r_count = RegInit(VecInit.fill(numIds) { 0.U(txnCountBits.W) })
val r_id = if (maxFlight == 1) {
Cat(in.ar.bits.id, 0.U(1.W))
} else {
Cat(in.ar.bits.id, r_count(in.ar.bits.id)(logFlight-1,0), 0.U(1.W))
}
assert (!in.ar.valid || r_size1 === UIntToOH1(r_size, beatCountBits)) // because aligned
in.ar.ready := r_out.ready
r_out.valid := in.ar.valid
r_out.bits :<= edgeOut.Get(r_id, r_addr, r_size)._2
Connectable.waiveUnmatched(r_out.bits.user, in.ar.bits.user) match {
case (lhs, rhs) => lhs.squeezeAll :<= rhs.squeezeAll
}
r_out.bits.user.lift(AMBAProt).foreach { rprot =>
rprot.privileged := in.ar.bits.prot(0)
rprot.secure := !in.ar.bits.prot(1)
rprot.fetch := in.ar.bits.prot(2)
rprot.bufferable := in.ar.bits.cache(0)
rprot.modifiable := in.ar.bits.cache(1)
rprot.readalloc := in.ar.bits.cache(2)
rprot.writealloc := in.ar.bits.cache(3)
}
val r_sel = UIntToOH(in.ar.bits.id, numIds)
(r_sel.asBools zip r_count) foreach { case (s, r) =>
when (in.ar.fire && s) { r := r + 1.U }
}
val w_out = WireDefault(out.a)
val w_size1 = in.aw.bits.bytes1()
val w_size = OH1ToUInt(w_size1)
val w_ok = edgeOut.manager.supportsPutPartialSafe(in.aw.bits.addr, w_size)
val w_addr = Mux(w_ok, in.aw.bits.addr, error.U | in.aw.bits.addr(log2Up(beatBytes)-1, 0))
val w_count = RegInit(VecInit.fill(numIds) { 0.U(txnCountBits.W) })
val w_id = if (maxFlight == 1) {
Cat(in.aw.bits.id, 1.U(1.W))
} else {
Cat(in.aw.bits.id, w_count(in.aw.bits.id)(logFlight-1,0), 1.U(1.W))
}
assert (!in.aw.valid || w_size1 === UIntToOH1(w_size, beatCountBits)) // because aligned
assert (!in.aw.valid || in.aw.bits.len === 0.U || in.aw.bits.size === log2Ceil(beatBytes).U) // because aligned
in.aw.ready := w_out.ready && in.w.valid && in.w.bits.last
in.w.ready := w_out.ready && in.aw.valid
w_out.valid := in.aw.valid && in.w.valid
w_out.bits :<= edgeOut.Put(w_id, w_addr, w_size, in.w.bits.data, in.w.bits.strb)._2
in.w.bits.user.lift(AMBACorrupt).foreach { w_out.bits.corrupt := _ }
Connectable.waiveUnmatched(w_out.bits.user, in.aw.bits.user) match {
case (lhs, rhs) => lhs.squeezeAll :<= rhs.squeezeAll
}
w_out.bits.user.lift(AMBAProt).foreach { wprot =>
wprot.privileged := in.aw.bits.prot(0)
wprot.secure := !in.aw.bits.prot(1)
wprot.fetch := in.aw.bits.prot(2)
wprot.bufferable := in.aw.bits.cache(0)
wprot.modifiable := in.aw.bits.cache(1)
wprot.readalloc := in.aw.bits.cache(2)
wprot.writealloc := in.aw.bits.cache(3)
}
val w_sel = UIntToOH(in.aw.bits.id, numIds)
(w_sel.asBools zip w_count) foreach { case (s, r) =>
when (in.aw.fire && s) { r := r + 1.U }
}
TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, r_out), (in.aw.bits.len, w_out))
val ok_b = WireDefault(in.b)
val ok_r = WireDefault(in.r)
val d_resp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY)
val d_hasData = edgeOut.hasData(out.d.bits)
val d_last = edgeOut.last(out.d)
out.d.ready := Mux(d_hasData, ok_r.ready, ok_b.ready)
ok_r.valid := out.d.valid && d_hasData
ok_b.valid := out.d.valid && !d_hasData
ok_r.bits.id := out.d.bits.source >> addedBits
ok_r.bits.data := out.d.bits.data
ok_r.bits.resp := d_resp
ok_r.bits.last := d_last
ok_r.bits.user :<= out.d.bits.user
// AXI4 needs irrevocable behaviour
in.r :<>= Queue.irrevocable(ok_r, 1, flow=true)
ok_b.bits.id := out.d.bits.source >> addedBits
ok_b.bits.resp := d_resp
ok_b.bits.user :<= out.d.bits.user
// AXI4 needs irrevocable behaviour
val q_b = Queue.irrevocable(ok_b, 1, flow=true)
// We need to prevent sending B valid before the last W beat is accepted
// TileLink allows early acknowledgement of a write burst, but AXI does not.
val b_count = RegInit(VecInit.fill(numIds) { 0.U(txnCountBits.W) })
val b_allow = b_count(in.b.bits.id) =/= w_count(in.b.bits.id)
val b_sel = UIntToOH(in.b.bits.id, numIds)
(b_sel.asBools zip b_count) foreach { case (s, r) =>
when (in.b.fire && s) { r := r + 1.U }
}
in.b.bits :<= q_b.bits
in.b.valid := q_b.valid && b_allow
q_b.ready := in.b.ready && b_allow
// Unused channels
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
class AXI4BundleRError(params: AXI4BundleParameters) extends AXI4BundleBase(params)
{
val id = UInt(params.idBits.W)
val last = Bool()
}
object AXI4ToTL
{
def apply(wcorrupt: Boolean = true)(implicit p: Parameters) =
{
val axi42tl = LazyModule(new AXI4ToTL(wcorrupt))
axi42tl.node
}
}
File WidthWidget.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.AddressSet
import freechips.rocketchip.util.{Repeater, UIntToOH1}
// innBeatBytes => the new client-facing bus width
class TLWidthWidget(innerBeatBytes: Int)(implicit p: Parameters) extends LazyModule
{
private def noChangeRequired(manager: TLManagerPortParameters) = manager.beatBytes == innerBeatBytes
val node = new TLAdapterNode(
clientFn = { case c => c },
managerFn = { case m => m.v1copy(beatBytes = innerBeatBytes) }){
override def circuitIdentity = edges.out.map(_.manager).forall(noChangeRequired)
}
override lazy val desiredName = s"TLWidthWidget$innerBeatBytes"
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def merge[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T]) = {
val inBytes = edgeIn.manager.beatBytes
val outBytes = edgeOut.manager.beatBytes
val ratio = outBytes / inBytes
val keepBits = log2Ceil(outBytes)
val dropBits = log2Ceil(inBytes)
val countBits = log2Ceil(ratio)
val size = edgeIn.size(in.bits)
val hasData = edgeIn.hasData(in.bits)
val limit = UIntToOH1(size, keepBits) >> dropBits
val count = RegInit(0.U(countBits.W))
val first = count === 0.U
val last = count === limit || !hasData
val enable = Seq.tabulate(ratio) { i => !((count ^ i.U) & limit).orR }
val corrupt_reg = RegInit(false.B)
val corrupt_in = edgeIn.corrupt(in.bits)
val corrupt_out = corrupt_in || corrupt_reg
when (in.fire) {
count := count + 1.U
corrupt_reg := corrupt_out
when (last) {
count := 0.U
corrupt_reg := false.B
}
}
def helper(idata: UInt): UInt = {
// rdata is X until the first time a multi-beat write occurs.
// Prevent the X from leaking outside by jamming the mux control until
// the first time rdata is written (and hence no longer X).
val rdata_written_once = RegInit(false.B)
val masked_enable = enable.map(_ || !rdata_written_once)
val odata = Seq.fill(ratio) { WireInit(idata) }
val rdata = Reg(Vec(ratio-1, chiselTypeOf(idata)))
val pdata = rdata :+ idata
val mdata = (masked_enable zip (odata zip pdata)) map { case (e, (o, p)) => Mux(e, o, p) }
when (in.fire && !last) {
rdata_written_once := true.B
(rdata zip mdata) foreach { case (r, m) => r := m }
}
Cat(mdata.reverse)
}
in.ready := out.ready || !last
out.valid := in.valid && last
out.bits := in.bits
// Don't put down hardware if we never carry data
edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits)))
edgeOut.corrupt(out.bits) := corrupt_out
(out.bits, in.bits) match {
case (o: TLBundleA, i: TLBundleA) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W))
case (o: TLBundleB, i: TLBundleB) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W))
case (o: TLBundleC, i: TLBundleC) => ()
case (o: TLBundleD, i: TLBundleD) => ()
case _ => require(false, "Impossible bundle combination in WidthWidget")
}
}
def split[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = {
val inBytes = edgeIn.manager.beatBytes
val outBytes = edgeOut.manager.beatBytes
val ratio = inBytes / outBytes
val keepBits = log2Ceil(inBytes)
val dropBits = log2Ceil(outBytes)
val countBits = log2Ceil(ratio)
val size = edgeIn.size(in.bits)
val hasData = edgeIn.hasData(in.bits)
val limit = UIntToOH1(size, keepBits) >> dropBits
val count = RegInit(0.U(countBits.W))
val first = count === 0.U
val last = count === limit || !hasData
when (out.fire) {
count := count + 1.U
when (last) { count := 0.U }
}
// For sub-beat transfer, extract which part matters
val sel = in.bits match {
case a: TLBundleA => a.address(keepBits-1, dropBits)
case b: TLBundleB => b.address(keepBits-1, dropBits)
case c: TLBundleC => c.address(keepBits-1, dropBits)
case d: TLBundleD => {
val sel = sourceMap(d.source)
val hold = Mux(first, sel, RegEnable(sel, first)) // a_first is not for whole xfer
hold & ~limit // if more than one a_first/xfer, the address must be aligned anyway
}
}
val index = sel | count
def helper(idata: UInt, width: Int): UInt = {
val mux = VecInit.tabulate(ratio) { i => idata((i+1)*outBytes*width-1, i*outBytes*width) }
mux(index)
}
out.bits := in.bits
out.valid := in.valid
in.ready := out.ready
// Don't put down hardware if we never carry data
edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits), 8))
(out.bits, in.bits) match {
case (o: TLBundleA, i: TLBundleA) => o.mask := helper(i.mask, 1)
case (o: TLBundleB, i: TLBundleB) => o.mask := helper(i.mask, 1)
case (o: TLBundleC, i: TLBundleC) => () // replicating corrupt to all beats is ok
case (o: TLBundleD, i: TLBundleD) => ()
case _ => require(false, "Impossbile bundle combination in WidthWidget")
}
// Repeat the input if we're not last
!last
}
def splice[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = {
if (edgeIn.manager.beatBytes == edgeOut.manager.beatBytes) {
// nothing to do; pass it through
out.bits := in.bits
out.valid := in.valid
in.ready := out.ready
} else if (edgeIn.manager.beatBytes > edgeOut.manager.beatBytes) {
// split input to output
val repeat = Wire(Bool())
val repeated = Repeater(in, repeat)
val cated = Wire(chiselTypeOf(repeated))
cated <> repeated
edgeIn.data(cated.bits) := Cat(
edgeIn.data(repeated.bits)(edgeIn.manager.beatBytes*8-1, edgeOut.manager.beatBytes*8),
edgeIn.data(in.bits)(edgeOut.manager.beatBytes*8-1, 0))
repeat := split(edgeIn, cated, edgeOut, out, sourceMap)
} else {
// merge input to output
merge(edgeIn, in, edgeOut, out)
}
}
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
// If the master is narrower than the slave, the D channel must be narrowed.
// This is tricky, because the D channel has no address data.
// Thus, you don't know which part of a sub-beat transfer to extract.
// To fix this, we record the relevant address bits for all sources.
// The assumption is that this sort of situation happens only where
// you connect a narrow master to the system bus, so there are few sources.
def sourceMap(source_bits: UInt) = {
val source = if (edgeIn.client.endSourceId == 1) 0.U(0.W) else source_bits
require (edgeOut.manager.beatBytes > edgeIn.manager.beatBytes)
val keepBits = log2Ceil(edgeOut.manager.beatBytes)
val dropBits = log2Ceil(edgeIn.manager.beatBytes)
val sources = Reg(Vec(edgeIn.client.endSourceId, UInt((keepBits-dropBits).W)))
val a_sel = in.a.bits.address(keepBits-1, dropBits)
when (in.a.fire) {
if (edgeIn.client.endSourceId == 1) { // avoid extraction-index-width warning
sources(0) := a_sel
} else {
sources(in.a.bits.source) := a_sel
}
}
// depopulate unused source registers:
edgeIn.client.unusedSources.foreach { id => sources(id) := 0.U }
val bypass = in.a.valid && in.a.bits.source === source
if (edgeIn.manager.minLatency > 0) sources(source)
else Mux(bypass, a_sel, sources(source))
}
splice(edgeIn, in.a, edgeOut, out.a, sourceMap)
splice(edgeOut, out.d, edgeIn, in.d, sourceMap)
if (edgeOut.manager.anySupportAcquireB && edgeIn.client.anySupportProbe) {
splice(edgeOut, out.b, edgeIn, in.b, sourceMap)
splice(edgeIn, in.c, edgeOut, out.c, sourceMap)
out.e.valid := in.e.valid
out.e.bits := in.e.bits
in.e.ready := out.e.ready
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLWidthWidget
{
def apply(innerBeatBytes: Int)(implicit p: Parameters): TLNode =
{
val widget = LazyModule(new TLWidthWidget(innerBeatBytes))
widget.node
}
def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper.beatBytes)
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMWidthWidget(first: Int, second: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("WidthWidget"))
val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff)))
(ram.node
:= TLDelayer(0.1)
:= TLFragmenter(4, 256)
:= TLWidthWidget(second)
:= TLWidthWidget(first)
:= TLDelayer(0.1)
:= model.node
:= fuzz.node)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMWidthWidgetTest(little: Int, big: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMWidthWidget(little,big,txns)).module)
dut.io.start := DontCare
io.finished := dut.io.finished
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File UserYanker.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.amba.axi4
import chisel3._
import chisel3.util.{Queue, QueueIO, UIntToOH}
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.lazymodule.{LazyModule, LazyModuleImp}
import freechips.rocketchip.util.BundleMap
/** This adapter prunes all user bit fields of the echo type from request messages,
* storing them in queues and echoing them back when matching response messages are received.
*
* It also optionally rate limits the number of transactions that can be in flight simultaneously
* per FIFO domain / A[W|R]ID.
*
* @param capMaxFlight is an optional maximum number of transactions that can be in flight per A[W|R]ID.
*/
class AXI4UserYanker(capMaxFlight: Option[Int] = None)(implicit p: Parameters) extends LazyModule
{
val node = AXI4AdapterNode(
masterFn = { mp => mp.copy(
masters = mp.masters.map { m => m.copy(
maxFlight = (m.maxFlight, capMaxFlight) match {
case (Some(x), Some(y)) => Some(x min y)
case (Some(x), None) => Some(x)
case (None, Some(y)) => Some(y)
case (None, None) => None })},
echoFields = Nil)},
slaveFn = { sp => sp })
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
// Which fields are we stripping?
val echoFields = edgeIn.master.echoFields
val need_bypass = edgeOut.slave.minLatency < 1
edgeOut.master.masters.foreach { m =>
require (m.maxFlight.isDefined, "UserYanker needs a flight cap on each ID")
}
def queue(id: Int) = {
val depth = edgeOut.master.masters.find(_.id.contains(id)).flatMap(_.maxFlight).getOrElse(0)
if (depth == 0) {
Wire(new QueueIO(BundleMap(echoFields), 1)) // unused ID => undefined value
} else {
Module(new Queue(BundleMap(echoFields), depth, flow=need_bypass)).io
}
}
val rqueues = Seq.tabulate(edgeIn.master.endId) { i => queue(i) }
val wqueues = Seq.tabulate(edgeIn.master.endId) { i => queue(i) }
val arid = in.ar.bits.id
val ar_ready = VecInit(rqueues.map(_.enq.ready))(arid)
in .ar.ready := out.ar.ready && ar_ready
out.ar.valid := in .ar.valid && ar_ready
Connectable.waiveUnmatched(out.ar.bits, in.ar.bits) match {
case (lhs, rhs) => lhs :<= rhs
}
val rid = out.r.bits.id
val r_valid = VecInit(rqueues.map(_.deq.valid))(rid)
val r_bits = VecInit(rqueues.map(_.deq.bits))(rid)
assert (!out.r.valid || r_valid) // Q must be ready faster than the response
Connectable.waiveUnmatched(in.r, out.r) match {
case (lhs, rhs) => lhs :<>= rhs
}
in.r.bits.echo :<= r_bits
val arsel = UIntToOH(arid, edgeIn.master.endId).asBools
val rsel = UIntToOH(rid, edgeIn.master.endId).asBools
(rqueues zip (arsel zip rsel)) foreach { case (q, (ar, r)) =>
q.deq.ready := out.r .valid && in .r .ready && r && out.r.bits.last
q.deq.valid := DontCare
q.deq.bits := DontCare
q.enq.valid := in .ar.valid && out.ar.ready && ar
q.enq.ready := DontCare
q.enq.bits :<>= in.ar.bits.echo
q.count := DontCare
}
val awid = in.aw.bits.id
val aw_ready = VecInit(wqueues.map(_.enq.ready))(awid)
in .aw.ready := out.aw.ready && aw_ready
out.aw.valid := in .aw.valid && aw_ready
Connectable.waiveUnmatched(out.aw.bits, in.aw.bits) match {
case (lhs, rhs) => lhs :<>= rhs
}
val bid = out.b.bits.id
val b_valid = VecInit(wqueues.map(_.deq.valid))(bid)
val b_bits = VecInit(wqueues.map(_.deq.bits))(bid)
assert (!out.b.valid || b_valid) // Q must be ready faster than the response
Connectable.waiveUnmatched(in.b, out.b) match {
case (lhs, rhs) => lhs :<>= rhs
}
in.b.bits.echo :<>= b_bits
val awsel = UIntToOH(awid, edgeIn.master.endId).asBools
val bsel = UIntToOH(bid, edgeIn.master.endId).asBools
(wqueues zip (awsel zip bsel)) foreach { case (q, (aw, b)) =>
q.deq.ready := out.b .valid && in .b .ready && b
q.deq.valid := DontCare
q.deq.bits := DontCare
q.enq.valid := in .aw.valid && out.aw.ready && aw
q.enq.ready := DontCare
q.enq.bits :<>= in.aw.bits.echo
q.count := DontCare
}
out.w :<>= in.w
}
}
}
object AXI4UserYanker
{
def apply(capMaxFlight: Option[Int] = None)(implicit p: Parameters): AXI4Node =
{
val axi4yank = LazyModule(new AXI4UserYanker(capMaxFlight))
axi4yank.node
}
}
| module TLInterconnectCoupler_fbus_from_port_named_slave_port_axi4( // @[LazyModuleImp.scala:138:7]
input clock, // @[LazyModuleImp.scala:138:7]
input reset, // @[LazyModuleImp.scala:138:7]
output auto_axi4index_in_aw_ready, // @[LazyModuleImp.scala:107:25]
input auto_axi4index_in_aw_valid, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_axi4index_in_aw_bits_id, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_axi4index_in_aw_bits_addr, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_axi4index_in_aw_bits_len, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_axi4index_in_aw_bits_size, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_axi4index_in_aw_bits_burst, // @[LazyModuleImp.scala:107:25]
input auto_axi4index_in_aw_bits_lock, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_axi4index_in_aw_bits_cache, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_axi4index_in_aw_bits_prot, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_axi4index_in_aw_bits_qos, // @[LazyModuleImp.scala:107:25]
output auto_axi4index_in_w_ready, // @[LazyModuleImp.scala:107:25]
input auto_axi4index_in_w_valid, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_axi4index_in_w_bits_data, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_axi4index_in_w_bits_strb, // @[LazyModuleImp.scala:107:25]
input auto_axi4index_in_w_bits_last, // @[LazyModuleImp.scala:107:25]
input auto_axi4index_in_b_ready, // @[LazyModuleImp.scala:107:25]
output auto_axi4index_in_b_valid, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_axi4index_in_b_bits_id, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_axi4index_in_b_bits_resp, // @[LazyModuleImp.scala:107:25]
output auto_axi4index_in_ar_ready, // @[LazyModuleImp.scala:107:25]
input auto_axi4index_in_ar_valid, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_axi4index_in_ar_bits_id, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_axi4index_in_ar_bits_addr, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_axi4index_in_ar_bits_len, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_axi4index_in_ar_bits_size, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_axi4index_in_ar_bits_burst, // @[LazyModuleImp.scala:107:25]
input auto_axi4index_in_ar_bits_lock, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_axi4index_in_ar_bits_cache, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_axi4index_in_ar_bits_prot, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_axi4index_in_ar_bits_qos, // @[LazyModuleImp.scala:107:25]
input auto_axi4index_in_r_ready, // @[LazyModuleImp.scala:107:25]
output auto_axi4index_in_r_valid, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_axi4index_in_r_bits_id, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_axi4index_in_r_bits_data, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_axi4index_in_r_bits_resp, // @[LazyModuleImp.scala:107:25]
output auto_axi4index_in_r_bits_last, // @[LazyModuleImp.scala:107:25]
input auto_tl_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_tl_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_tl_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_tl_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_tl_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_tl_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_tl_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output auto_tl_out_a_bits_user_amba_prot_bufferable, // @[LazyModuleImp.scala:107:25]
output auto_tl_out_a_bits_user_amba_prot_modifiable, // @[LazyModuleImp.scala:107:25]
output auto_tl_out_a_bits_user_amba_prot_readalloc, // @[LazyModuleImp.scala:107:25]
output auto_tl_out_a_bits_user_amba_prot_writealloc, // @[LazyModuleImp.scala:107:25]
output auto_tl_out_a_bits_user_amba_prot_privileged, // @[LazyModuleImp.scala:107:25]
output auto_tl_out_a_bits_user_amba_prot_secure, // @[LazyModuleImp.scala:107:25]
output auto_tl_out_a_bits_user_amba_prot_fetch, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_tl_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_tl_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_tl_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_tl_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_tl_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_tl_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_tl_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_tl_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_tl_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_tl_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_tl_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_tl_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_tl_out_d_bits_corrupt // @[LazyModuleImp.scala:107:25]
);
wire widget_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire [63:0] widget_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_out_d_bits_source; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9]
wire [1:0] widget_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_ready; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire [63:0] widget_auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9]
wire [7:0] widget_auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [31:0] widget_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_in_a_bits_source; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_bits_user_amba_prot_fetch; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_bits_user_amba_prot_secure; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_bits_user_amba_prot_privileged; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_bits_user_amba_prot_writealloc; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_bits_user_amba_prot_readalloc; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_bits_user_amba_prot_modifiable; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_bits_user_amba_prot_bufferable; // @[WidthWidget.scala:27:9]
wire _axi4index_auto_out_aw_valid; // @[IdIndexer.scala:108:31]
wire _axi4index_auto_out_aw_bits_id; // @[IdIndexer.scala:108:31]
wire [31:0] _axi4index_auto_out_aw_bits_addr; // @[IdIndexer.scala:108:31]
wire [7:0] _axi4index_auto_out_aw_bits_len; // @[IdIndexer.scala:108:31]
wire [2:0] _axi4index_auto_out_aw_bits_size; // @[IdIndexer.scala:108:31]
wire [1:0] _axi4index_auto_out_aw_bits_burst; // @[IdIndexer.scala:108:31]
wire _axi4index_auto_out_aw_bits_lock; // @[IdIndexer.scala:108:31]
wire [3:0] _axi4index_auto_out_aw_bits_cache; // @[IdIndexer.scala:108:31]
wire [2:0] _axi4index_auto_out_aw_bits_prot; // @[IdIndexer.scala:108:31]
wire [3:0] _axi4index_auto_out_aw_bits_qos; // @[IdIndexer.scala:108:31]
wire [6:0] _axi4index_auto_out_aw_bits_echo_extra_id; // @[IdIndexer.scala:108:31]
wire _axi4index_auto_out_w_valid; // @[IdIndexer.scala:108:31]
wire [63:0] _axi4index_auto_out_w_bits_data; // @[IdIndexer.scala:108:31]
wire [7:0] _axi4index_auto_out_w_bits_strb; // @[IdIndexer.scala:108:31]
wire _axi4index_auto_out_w_bits_last; // @[IdIndexer.scala:108:31]
wire _axi4index_auto_out_b_ready; // @[IdIndexer.scala:108:31]
wire _axi4index_auto_out_ar_valid; // @[IdIndexer.scala:108:31]
wire _axi4index_auto_out_ar_bits_id; // @[IdIndexer.scala:108:31]
wire [31:0] _axi4index_auto_out_ar_bits_addr; // @[IdIndexer.scala:108:31]
wire [7:0] _axi4index_auto_out_ar_bits_len; // @[IdIndexer.scala:108:31]
wire [2:0] _axi4index_auto_out_ar_bits_size; // @[IdIndexer.scala:108:31]
wire [1:0] _axi4index_auto_out_ar_bits_burst; // @[IdIndexer.scala:108:31]
wire _axi4index_auto_out_ar_bits_lock; // @[IdIndexer.scala:108:31]
wire [3:0] _axi4index_auto_out_ar_bits_cache; // @[IdIndexer.scala:108:31]
wire [2:0] _axi4index_auto_out_ar_bits_prot; // @[IdIndexer.scala:108:31]
wire [3:0] _axi4index_auto_out_ar_bits_qos; // @[IdIndexer.scala:108:31]
wire [6:0] _axi4index_auto_out_ar_bits_echo_extra_id; // @[IdIndexer.scala:108:31]
wire _axi4index_auto_out_r_ready; // @[IdIndexer.scala:108:31]
wire _axi4frag_auto_in_aw_ready; // @[Fragmenter.scala:224:30]
wire _axi4frag_auto_in_w_ready; // @[Fragmenter.scala:224:30]
wire _axi4frag_auto_in_b_valid; // @[Fragmenter.scala:224:30]
wire _axi4frag_auto_in_b_bits_id; // @[Fragmenter.scala:224:30]
wire [1:0] _axi4frag_auto_in_b_bits_resp; // @[Fragmenter.scala:224:30]
wire [6:0] _axi4frag_auto_in_b_bits_echo_extra_id; // @[Fragmenter.scala:224:30]
wire _axi4frag_auto_in_ar_ready; // @[Fragmenter.scala:224:30]
wire _axi4frag_auto_in_r_valid; // @[Fragmenter.scala:224:30]
wire _axi4frag_auto_in_r_bits_id; // @[Fragmenter.scala:224:30]
wire [63:0] _axi4frag_auto_in_r_bits_data; // @[Fragmenter.scala:224:30]
wire [1:0] _axi4frag_auto_in_r_bits_resp; // @[Fragmenter.scala:224:30]
wire [6:0] _axi4frag_auto_in_r_bits_echo_extra_id; // @[Fragmenter.scala:224:30]
wire _axi4frag_auto_in_r_bits_last; // @[Fragmenter.scala:224:30]
wire _axi4frag_auto_out_aw_valid; // @[Fragmenter.scala:224:30]
wire _axi4frag_auto_out_aw_bits_id; // @[Fragmenter.scala:224:30]
wire [31:0] _axi4frag_auto_out_aw_bits_addr; // @[Fragmenter.scala:224:30]
wire [7:0] _axi4frag_auto_out_aw_bits_len; // @[Fragmenter.scala:224:30]
wire [2:0] _axi4frag_auto_out_aw_bits_size; // @[Fragmenter.scala:224:30]
wire [1:0] _axi4frag_auto_out_aw_bits_burst; // @[Fragmenter.scala:224:30]
wire _axi4frag_auto_out_aw_bits_lock; // @[Fragmenter.scala:224:30]
wire [3:0] _axi4frag_auto_out_aw_bits_cache; // @[Fragmenter.scala:224:30]
wire [2:0] _axi4frag_auto_out_aw_bits_prot; // @[Fragmenter.scala:224:30]
wire [3:0] _axi4frag_auto_out_aw_bits_qos; // @[Fragmenter.scala:224:30]
wire [6:0] _axi4frag_auto_out_aw_bits_echo_extra_id; // @[Fragmenter.scala:224:30]
wire _axi4frag_auto_out_aw_bits_echo_real_last; // @[Fragmenter.scala:224:30]
wire _axi4frag_auto_out_w_valid; // @[Fragmenter.scala:224:30]
wire [63:0] _axi4frag_auto_out_w_bits_data; // @[Fragmenter.scala:224:30]
wire [7:0] _axi4frag_auto_out_w_bits_strb; // @[Fragmenter.scala:224:30]
wire _axi4frag_auto_out_w_bits_last; // @[Fragmenter.scala:224:30]
wire _axi4frag_auto_out_b_ready; // @[Fragmenter.scala:224:30]
wire _axi4frag_auto_out_ar_valid; // @[Fragmenter.scala:224:30]
wire _axi4frag_auto_out_ar_bits_id; // @[Fragmenter.scala:224:30]
wire [31:0] _axi4frag_auto_out_ar_bits_addr; // @[Fragmenter.scala:224:30]
wire [7:0] _axi4frag_auto_out_ar_bits_len; // @[Fragmenter.scala:224:30]
wire [2:0] _axi4frag_auto_out_ar_bits_size; // @[Fragmenter.scala:224:30]
wire [1:0] _axi4frag_auto_out_ar_bits_burst; // @[Fragmenter.scala:224:30]
wire _axi4frag_auto_out_ar_bits_lock; // @[Fragmenter.scala:224:30]
wire [3:0] _axi4frag_auto_out_ar_bits_cache; // @[Fragmenter.scala:224:30]
wire [2:0] _axi4frag_auto_out_ar_bits_prot; // @[Fragmenter.scala:224:30]
wire [3:0] _axi4frag_auto_out_ar_bits_qos; // @[Fragmenter.scala:224:30]
wire [6:0] _axi4frag_auto_out_ar_bits_echo_extra_id; // @[Fragmenter.scala:224:30]
wire _axi4frag_auto_out_ar_bits_echo_real_last; // @[Fragmenter.scala:224:30]
wire _axi4frag_auto_out_r_ready; // @[Fragmenter.scala:224:30]
wire _axi4yank_auto_in_aw_ready; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_in_w_ready; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_in_b_valid; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_in_b_bits_id; // @[UserYanker.scala:125:30]
wire [1:0] _axi4yank_auto_in_b_bits_resp; // @[UserYanker.scala:125:30]
wire [6:0] _axi4yank_auto_in_b_bits_echo_extra_id; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_in_b_bits_echo_real_last; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_in_ar_ready; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_in_r_valid; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_in_r_bits_id; // @[UserYanker.scala:125:30]
wire [63:0] _axi4yank_auto_in_r_bits_data; // @[UserYanker.scala:125:30]
wire [1:0] _axi4yank_auto_in_r_bits_resp; // @[UserYanker.scala:125:30]
wire [6:0] _axi4yank_auto_in_r_bits_echo_extra_id; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_in_r_bits_echo_real_last; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_in_r_bits_last; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_out_aw_valid; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_out_aw_bits_id; // @[UserYanker.scala:125:30]
wire [31:0] _axi4yank_auto_out_aw_bits_addr; // @[UserYanker.scala:125:30]
wire [7:0] _axi4yank_auto_out_aw_bits_len; // @[UserYanker.scala:125:30]
wire [2:0] _axi4yank_auto_out_aw_bits_size; // @[UserYanker.scala:125:30]
wire [1:0] _axi4yank_auto_out_aw_bits_burst; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_out_aw_bits_lock; // @[UserYanker.scala:125:30]
wire [3:0] _axi4yank_auto_out_aw_bits_cache; // @[UserYanker.scala:125:30]
wire [2:0] _axi4yank_auto_out_aw_bits_prot; // @[UserYanker.scala:125:30]
wire [3:0] _axi4yank_auto_out_aw_bits_qos; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_out_w_valid; // @[UserYanker.scala:125:30]
wire [63:0] _axi4yank_auto_out_w_bits_data; // @[UserYanker.scala:125:30]
wire [7:0] _axi4yank_auto_out_w_bits_strb; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_out_w_bits_last; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_out_b_ready; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_out_ar_valid; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_out_ar_bits_id; // @[UserYanker.scala:125:30]
wire [31:0] _axi4yank_auto_out_ar_bits_addr; // @[UserYanker.scala:125:30]
wire [7:0] _axi4yank_auto_out_ar_bits_len; // @[UserYanker.scala:125:30]
wire [2:0] _axi4yank_auto_out_ar_bits_size; // @[UserYanker.scala:125:30]
wire [1:0] _axi4yank_auto_out_ar_bits_burst; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_out_ar_bits_lock; // @[UserYanker.scala:125:30]
wire [3:0] _axi4yank_auto_out_ar_bits_cache; // @[UserYanker.scala:125:30]
wire [2:0] _axi4yank_auto_out_ar_bits_prot; // @[UserYanker.scala:125:30]
wire [3:0] _axi4yank_auto_out_ar_bits_qos; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_out_r_ready; // @[UserYanker.scala:125:30]
wire _axi42tl_auto_in_aw_ready; // @[ToTL.scala:238:29]
wire _axi42tl_auto_in_w_ready; // @[ToTL.scala:238:29]
wire _axi42tl_auto_in_b_valid; // @[ToTL.scala:238:29]
wire _axi42tl_auto_in_b_bits_id; // @[ToTL.scala:238:29]
wire [1:0] _axi42tl_auto_in_b_bits_resp; // @[ToTL.scala:238:29]
wire _axi42tl_auto_in_ar_ready; // @[ToTL.scala:238:29]
wire _axi42tl_auto_in_r_valid; // @[ToTL.scala:238:29]
wire _axi42tl_auto_in_r_bits_id; // @[ToTL.scala:238:29]
wire [63:0] _axi42tl_auto_in_r_bits_data; // @[ToTL.scala:238:29]
wire [1:0] _axi42tl_auto_in_r_bits_resp; // @[ToTL.scala:238:29]
wire _axi42tl_auto_in_r_bits_last; // @[ToTL.scala:238:29]
wire _fixer_auto_anon_out_a_valid; // @[FIFOFixer.scala:152:27]
wire [2:0] _fixer_auto_anon_out_a_bits_opcode; // @[FIFOFixer.scala:152:27]
wire [3:0] _fixer_auto_anon_out_a_bits_size; // @[FIFOFixer.scala:152:27]
wire [3:0] _fixer_auto_anon_out_a_bits_source; // @[FIFOFixer.scala:152:27]
wire [31:0] _fixer_auto_anon_out_a_bits_address; // @[FIFOFixer.scala:152:27]
wire _fixer_auto_anon_out_a_bits_user_amba_prot_bufferable; // @[FIFOFixer.scala:152:27]
wire _fixer_auto_anon_out_a_bits_user_amba_prot_modifiable; // @[FIFOFixer.scala:152:27]
wire _fixer_auto_anon_out_a_bits_user_amba_prot_readalloc; // @[FIFOFixer.scala:152:27]
wire _fixer_auto_anon_out_a_bits_user_amba_prot_writealloc; // @[FIFOFixer.scala:152:27]
wire _fixer_auto_anon_out_a_bits_user_amba_prot_privileged; // @[FIFOFixer.scala:152:27]
wire _fixer_auto_anon_out_a_bits_user_amba_prot_secure; // @[FIFOFixer.scala:152:27]
wire _fixer_auto_anon_out_a_bits_user_amba_prot_fetch; // @[FIFOFixer.scala:152:27]
wire [7:0] _fixer_auto_anon_out_a_bits_mask; // @[FIFOFixer.scala:152:27]
wire [63:0] _fixer_auto_anon_out_a_bits_data; // @[FIFOFixer.scala:152:27]
wire _fixer_auto_anon_out_d_ready; // @[FIFOFixer.scala:152:27]
wire _buffer_auto_in_a_ready; // @[Buffer.scala:75:28]
wire _buffer_auto_in_d_valid; // @[Buffer.scala:75:28]
wire [2:0] _buffer_auto_in_d_bits_opcode; // @[Buffer.scala:75:28]
wire [1:0] _buffer_auto_in_d_bits_param; // @[Buffer.scala:75:28]
wire [3:0] _buffer_auto_in_d_bits_size; // @[Buffer.scala:75:28]
wire [3:0] _buffer_auto_in_d_bits_source; // @[Buffer.scala:75:28]
wire _buffer_auto_in_d_bits_sink; // @[Buffer.scala:75:28]
wire _buffer_auto_in_d_bits_denied; // @[Buffer.scala:75:28]
wire [63:0] _buffer_auto_in_d_bits_data; // @[Buffer.scala:75:28]
wire _buffer_auto_in_d_bits_corrupt; // @[Buffer.scala:75:28]
wire auto_axi4index_in_aw_valid_0 = auto_axi4index_in_aw_valid; // @[LazyModuleImp.scala:138:7]
wire [7:0] auto_axi4index_in_aw_bits_id_0 = auto_axi4index_in_aw_bits_id; // @[LazyModuleImp.scala:138:7]
wire [31:0] auto_axi4index_in_aw_bits_addr_0 = auto_axi4index_in_aw_bits_addr; // @[LazyModuleImp.scala:138:7]
wire [7:0] auto_axi4index_in_aw_bits_len_0 = auto_axi4index_in_aw_bits_len; // @[LazyModuleImp.scala:138:7]
wire [2:0] auto_axi4index_in_aw_bits_size_0 = auto_axi4index_in_aw_bits_size; // @[LazyModuleImp.scala:138:7]
wire [1:0] auto_axi4index_in_aw_bits_burst_0 = auto_axi4index_in_aw_bits_burst; // @[LazyModuleImp.scala:138:7]
wire auto_axi4index_in_aw_bits_lock_0 = auto_axi4index_in_aw_bits_lock; // @[LazyModuleImp.scala:138:7]
wire [3:0] auto_axi4index_in_aw_bits_cache_0 = auto_axi4index_in_aw_bits_cache; // @[LazyModuleImp.scala:138:7]
wire [2:0] auto_axi4index_in_aw_bits_prot_0 = auto_axi4index_in_aw_bits_prot; // @[LazyModuleImp.scala:138:7]
wire [3:0] auto_axi4index_in_aw_bits_qos_0 = auto_axi4index_in_aw_bits_qos; // @[LazyModuleImp.scala:138:7]
wire auto_axi4index_in_w_valid_0 = auto_axi4index_in_w_valid; // @[LazyModuleImp.scala:138:7]
wire [63:0] auto_axi4index_in_w_bits_data_0 = auto_axi4index_in_w_bits_data; // @[LazyModuleImp.scala:138:7]
wire [7:0] auto_axi4index_in_w_bits_strb_0 = auto_axi4index_in_w_bits_strb; // @[LazyModuleImp.scala:138:7]
wire auto_axi4index_in_w_bits_last_0 = auto_axi4index_in_w_bits_last; // @[LazyModuleImp.scala:138:7]
wire auto_axi4index_in_b_ready_0 = auto_axi4index_in_b_ready; // @[LazyModuleImp.scala:138:7]
wire auto_axi4index_in_ar_valid_0 = auto_axi4index_in_ar_valid; // @[LazyModuleImp.scala:138:7]
wire [7:0] auto_axi4index_in_ar_bits_id_0 = auto_axi4index_in_ar_bits_id; // @[LazyModuleImp.scala:138:7]
wire [31:0] auto_axi4index_in_ar_bits_addr_0 = auto_axi4index_in_ar_bits_addr; // @[LazyModuleImp.scala:138:7]
wire [7:0] auto_axi4index_in_ar_bits_len_0 = auto_axi4index_in_ar_bits_len; // @[LazyModuleImp.scala:138:7]
wire [2:0] auto_axi4index_in_ar_bits_size_0 = auto_axi4index_in_ar_bits_size; // @[LazyModuleImp.scala:138:7]
wire [1:0] auto_axi4index_in_ar_bits_burst_0 = auto_axi4index_in_ar_bits_burst; // @[LazyModuleImp.scala:138:7]
wire auto_axi4index_in_ar_bits_lock_0 = auto_axi4index_in_ar_bits_lock; // @[LazyModuleImp.scala:138:7]
wire [3:0] auto_axi4index_in_ar_bits_cache_0 = auto_axi4index_in_ar_bits_cache; // @[LazyModuleImp.scala:138:7]
wire [2:0] auto_axi4index_in_ar_bits_prot_0 = auto_axi4index_in_ar_bits_prot; // @[LazyModuleImp.scala:138:7]
wire [3:0] auto_axi4index_in_ar_bits_qos_0 = auto_axi4index_in_ar_bits_qos; // @[LazyModuleImp.scala:138:7]
wire auto_axi4index_in_r_ready_0 = auto_axi4index_in_r_ready; // @[LazyModuleImp.scala:138:7]
wire auto_tl_out_a_ready_0 = auto_tl_out_a_ready; // @[LazyModuleImp.scala:138:7]
wire auto_tl_out_d_valid_0 = auto_tl_out_d_valid; // @[LazyModuleImp.scala:138:7]
wire [2:0] auto_tl_out_d_bits_opcode_0 = auto_tl_out_d_bits_opcode; // @[LazyModuleImp.scala:138:7]
wire [1:0] auto_tl_out_d_bits_param_0 = auto_tl_out_d_bits_param; // @[LazyModuleImp.scala:138:7]
wire [3:0] auto_tl_out_d_bits_size_0 = auto_tl_out_d_bits_size; // @[LazyModuleImp.scala:138:7]
wire [3:0] auto_tl_out_d_bits_source_0 = auto_tl_out_d_bits_source; // @[LazyModuleImp.scala:138:7]
wire auto_tl_out_d_bits_sink_0 = auto_tl_out_d_bits_sink; // @[LazyModuleImp.scala:138:7]
wire auto_tl_out_d_bits_denied_0 = auto_tl_out_d_bits_denied; // @[LazyModuleImp.scala:138:7]
wire [63:0] auto_tl_out_d_bits_data_0 = auto_tl_out_d_bits_data; // @[LazyModuleImp.scala:138:7]
wire auto_tl_out_d_bits_corrupt_0 = auto_tl_out_d_bits_corrupt; // @[LazyModuleImp.scala:138:7]
wire widget_auto_anon_in_a_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9]
wire widget_anonOut_a_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9]
wire widget_anonIn_a_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_a_bits_param = 3'h0; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_out_a_bits_param = 3'h0; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonOut_a_bits_param = 3'h0; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonIn_a_bits_param = 3'h0; // @[WidthWidget.scala:27:9]
wire tlOut_a_ready = auto_tl_out_a_ready_0; // @[MixedNode.scala:542:17]
wire tlOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] tlOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] tlOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] tlOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [3:0] tlOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] tlOut_a_bits_address; // @[MixedNode.scala:542:17]
wire tlOut_a_bits_user_amba_prot_bufferable; // @[MixedNode.scala:542:17]
wire tlOut_a_bits_user_amba_prot_modifiable; // @[MixedNode.scala:542:17]
wire tlOut_a_bits_user_amba_prot_readalloc; // @[MixedNode.scala:542:17]
wire tlOut_a_bits_user_amba_prot_writealloc; // @[MixedNode.scala:542:17]
wire tlOut_a_bits_user_amba_prot_privileged; // @[MixedNode.scala:542:17]
wire tlOut_a_bits_user_amba_prot_secure; // @[MixedNode.scala:542:17]
wire tlOut_a_bits_user_amba_prot_fetch; // @[MixedNode.scala:542:17]
wire [7:0] tlOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] tlOut_a_bits_data; // @[MixedNode.scala:542:17]
wire tlOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire tlOut_d_ready; // @[MixedNode.scala:542:17]
wire tlOut_d_valid = auto_tl_out_d_valid_0; // @[MixedNode.scala:542:17]
wire [2:0] tlOut_d_bits_opcode = auto_tl_out_d_bits_opcode_0; // @[MixedNode.scala:542:17]
wire [1:0] tlOut_d_bits_param = auto_tl_out_d_bits_param_0; // @[MixedNode.scala:542:17]
wire [3:0] tlOut_d_bits_size = auto_tl_out_d_bits_size_0; // @[MixedNode.scala:542:17]
wire [3:0] tlOut_d_bits_source = auto_tl_out_d_bits_source_0; // @[MixedNode.scala:542:17]
wire tlOut_d_bits_sink = auto_tl_out_d_bits_sink_0; // @[MixedNode.scala:542:17]
wire tlOut_d_bits_denied = auto_tl_out_d_bits_denied_0; // @[MixedNode.scala:542:17]
wire [63:0] tlOut_d_bits_data = auto_tl_out_d_bits_data_0; // @[MixedNode.scala:542:17]
wire tlOut_d_bits_corrupt = auto_tl_out_d_bits_corrupt_0; // @[MixedNode.scala:542:17]
wire auto_axi4index_in_aw_ready_0; // @[LazyModuleImp.scala:138:7]
wire auto_axi4index_in_w_ready_0; // @[LazyModuleImp.scala:138:7]
wire [7:0] auto_axi4index_in_b_bits_id_0; // @[LazyModuleImp.scala:138:7]
wire [1:0] auto_axi4index_in_b_bits_resp_0; // @[LazyModuleImp.scala:138:7]
wire auto_axi4index_in_b_valid_0; // @[LazyModuleImp.scala:138:7]
wire auto_axi4index_in_ar_ready_0; // @[LazyModuleImp.scala:138:7]
wire [7:0] auto_axi4index_in_r_bits_id_0; // @[LazyModuleImp.scala:138:7]
wire [63:0] auto_axi4index_in_r_bits_data_0; // @[LazyModuleImp.scala:138:7]
wire [1:0] auto_axi4index_in_r_bits_resp_0; // @[LazyModuleImp.scala:138:7]
wire auto_axi4index_in_r_bits_last_0; // @[LazyModuleImp.scala:138:7]
wire auto_axi4index_in_r_valid_0; // @[LazyModuleImp.scala:138:7]
wire auto_tl_out_a_bits_user_amba_prot_bufferable_0; // @[LazyModuleImp.scala:138:7]
wire auto_tl_out_a_bits_user_amba_prot_modifiable_0; // @[LazyModuleImp.scala:138:7]
wire auto_tl_out_a_bits_user_amba_prot_readalloc_0; // @[LazyModuleImp.scala:138:7]
wire auto_tl_out_a_bits_user_amba_prot_writealloc_0; // @[LazyModuleImp.scala:138:7]
wire auto_tl_out_a_bits_user_amba_prot_privileged_0; // @[LazyModuleImp.scala:138:7]
wire auto_tl_out_a_bits_user_amba_prot_secure_0; // @[LazyModuleImp.scala:138:7]
wire auto_tl_out_a_bits_user_amba_prot_fetch_0; // @[LazyModuleImp.scala:138:7]
wire [2:0] auto_tl_out_a_bits_opcode_0; // @[LazyModuleImp.scala:138:7]
wire [2:0] auto_tl_out_a_bits_param_0; // @[LazyModuleImp.scala:138:7]
wire [3:0] auto_tl_out_a_bits_size_0; // @[LazyModuleImp.scala:138:7]
wire [3:0] auto_tl_out_a_bits_source_0; // @[LazyModuleImp.scala:138:7]
wire [31:0] auto_tl_out_a_bits_address_0; // @[LazyModuleImp.scala:138:7]
wire [7:0] auto_tl_out_a_bits_mask_0; // @[LazyModuleImp.scala:138:7]
wire [63:0] auto_tl_out_a_bits_data_0; // @[LazyModuleImp.scala:138:7]
wire auto_tl_out_a_bits_corrupt_0; // @[LazyModuleImp.scala:138:7]
wire auto_tl_out_a_valid_0; // @[LazyModuleImp.scala:138:7]
wire auto_tl_out_d_ready_0; // @[LazyModuleImp.scala:138:7]
wire widget_anonIn_a_ready; // @[MixedNode.scala:551:17]
wire widget_anonIn_a_valid = widget_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonIn_a_bits_opcode = widget_auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [3:0] widget_anonIn_a_bits_size = widget_auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9]
wire [3:0] widget_anonIn_a_bits_source = widget_auto_anon_in_a_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] widget_anonIn_a_bits_address = widget_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire widget_anonIn_a_bits_user_amba_prot_bufferable = widget_auto_anon_in_a_bits_user_amba_prot_bufferable; // @[WidthWidget.scala:27:9]
wire widget_anonIn_a_bits_user_amba_prot_modifiable = widget_auto_anon_in_a_bits_user_amba_prot_modifiable; // @[WidthWidget.scala:27:9]
wire widget_anonIn_a_bits_user_amba_prot_readalloc = widget_auto_anon_in_a_bits_user_amba_prot_readalloc; // @[WidthWidget.scala:27:9]
wire widget_anonIn_a_bits_user_amba_prot_writealloc = widget_auto_anon_in_a_bits_user_amba_prot_writealloc; // @[WidthWidget.scala:27:9]
wire widget_anonIn_a_bits_user_amba_prot_privileged = widget_auto_anon_in_a_bits_user_amba_prot_privileged; // @[WidthWidget.scala:27:9]
wire widget_anonIn_a_bits_user_amba_prot_secure = widget_auto_anon_in_a_bits_user_amba_prot_secure; // @[WidthWidget.scala:27:9]
wire widget_anonIn_a_bits_user_amba_prot_fetch = widget_auto_anon_in_a_bits_user_amba_prot_fetch; // @[WidthWidget.scala:27:9]
wire [7:0] widget_anonIn_a_bits_mask = widget_auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [63:0] widget_anonIn_a_bits_data = widget_auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9]
wire widget_anonIn_d_ready = widget_auto_anon_in_d_ready; // @[WidthWidget.scala:27:9]
wire widget_anonIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] widget_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] widget_anonIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] widget_anonIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [3:0] widget_anonIn_d_bits_source; // @[MixedNode.scala:551:17]
wire widget_anonIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire widget_anonIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] widget_anonIn_d_bits_data; // @[MixedNode.scala:551:17]
wire widget_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire widget_anonOut_a_ready = widget_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9]
wire widget_anonOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] widget_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [3:0] widget_anonOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [3:0] widget_anonOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] widget_anonOut_a_bits_address; // @[MixedNode.scala:542:17]
wire widget_anonOut_a_bits_user_amba_prot_bufferable; // @[MixedNode.scala:542:17]
wire widget_anonOut_a_bits_user_amba_prot_modifiable; // @[MixedNode.scala:542:17]
wire widget_anonOut_a_bits_user_amba_prot_readalloc; // @[MixedNode.scala:542:17]
wire widget_anonOut_a_bits_user_amba_prot_writealloc; // @[MixedNode.scala:542:17]
wire widget_anonOut_a_bits_user_amba_prot_privileged; // @[MixedNode.scala:542:17]
wire widget_anonOut_a_bits_user_amba_prot_secure; // @[MixedNode.scala:542:17]
wire widget_anonOut_a_bits_user_amba_prot_fetch; // @[MixedNode.scala:542:17]
wire [7:0] widget_anonOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] widget_anonOut_a_bits_data; // @[MixedNode.scala:542:17]
wire widget_anonOut_d_ready; // @[MixedNode.scala:542:17]
wire widget_anonOut_d_valid = widget_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonOut_d_bits_opcode = widget_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] widget_anonOut_d_bits_param = widget_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_anonOut_d_bits_size = widget_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9]
wire [3:0] widget_anonOut_d_bits_source = widget_auto_anon_out_d_bits_source; // @[WidthWidget.scala:27:9]
wire widget_anonOut_d_bits_sink = widget_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9]
wire widget_anonOut_d_bits_denied = widget_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [63:0] widget_anonOut_d_bits_data = widget_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9]
wire widget_anonOut_d_bits_corrupt = widget_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_ready; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] widget_auto_anon_in_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_in_d_bits_size; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_in_d_bits_source; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_bits_sink; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [63:0] widget_auto_anon_in_d_bits_data; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_valid; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_bits_user_amba_prot_bufferable; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_bits_user_amba_prot_modifiable; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_bits_user_amba_prot_readalloc; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_bits_user_amba_prot_writealloc; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_bits_user_amba_prot_privileged; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_bits_user_amba_prot_secure; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_bits_user_amba_prot_fetch; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_out_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_out_a_bits_size; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_out_a_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] widget_auto_anon_out_a_bits_address; // @[WidthWidget.scala:27:9]
wire [7:0] widget_auto_anon_out_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [63:0] widget_auto_anon_out_a_bits_data; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_valid; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_d_ready; // @[WidthWidget.scala:27:9]
assign widget_anonIn_a_ready = widget_anonOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_out_a_valid = widget_anonOut_a_valid; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_opcode = widget_anonOut_a_bits_opcode; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_size = widget_anonOut_a_bits_size; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_source = widget_anonOut_a_bits_source; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_address = widget_anonOut_a_bits_address; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_user_amba_prot_bufferable = widget_anonOut_a_bits_user_amba_prot_bufferable; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_user_amba_prot_modifiable = widget_anonOut_a_bits_user_amba_prot_modifiable; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_user_amba_prot_readalloc = widget_anonOut_a_bits_user_amba_prot_readalloc; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_user_amba_prot_writealloc = widget_anonOut_a_bits_user_amba_prot_writealloc; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_user_amba_prot_privileged = widget_anonOut_a_bits_user_amba_prot_privileged; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_user_amba_prot_secure = widget_anonOut_a_bits_user_amba_prot_secure; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_user_amba_prot_fetch = widget_anonOut_a_bits_user_amba_prot_fetch; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_mask = widget_anonOut_a_bits_mask; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_data = widget_anonOut_a_bits_data; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_d_ready = widget_anonOut_d_ready; // @[WidthWidget.scala:27:9]
assign widget_anonIn_d_valid = widget_anonOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_opcode = widget_anonOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_param = widget_anonOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_size = widget_anonOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_source = widget_anonOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_sink = widget_anonOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_denied = widget_anonOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_data = widget_anonOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_corrupt = widget_anonOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_in_a_ready = widget_anonIn_a_ready; // @[WidthWidget.scala:27:9]
assign widget_anonOut_a_valid = widget_anonIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_opcode = widget_anonIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_size = widget_anonIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_source = widget_anonIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_address = widget_anonIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_user_amba_prot_bufferable = widget_anonIn_a_bits_user_amba_prot_bufferable; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_user_amba_prot_modifiable = widget_anonIn_a_bits_user_amba_prot_modifiable; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_user_amba_prot_readalloc = widget_anonIn_a_bits_user_amba_prot_readalloc; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_user_amba_prot_writealloc = widget_anonIn_a_bits_user_amba_prot_writealloc; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_user_amba_prot_privileged = widget_anonIn_a_bits_user_amba_prot_privileged; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_user_amba_prot_secure = widget_anonIn_a_bits_user_amba_prot_secure; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_user_amba_prot_fetch = widget_anonIn_a_bits_user_amba_prot_fetch; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_mask = widget_anonIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_data = widget_anonIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_d_ready = widget_anonIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_in_d_valid = widget_anonIn_d_valid; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_opcode = widget_anonIn_d_bits_opcode; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_param = widget_anonIn_d_bits_param; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_size = widget_anonIn_d_bits_size; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_source = widget_anonIn_d_bits_source; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_sink = widget_anonIn_d_bits_sink; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_denied = widget_anonIn_d_bits_denied; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_data = widget_anonIn_d_bits_data; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_corrupt = widget_anonIn_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire tlIn_a_ready = tlOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
wire tlIn_a_valid; // @[MixedNode.scala:551:17]
assign auto_tl_out_a_valid_0 = tlOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] tlIn_a_bits_opcode; // @[MixedNode.scala:551:17]
assign auto_tl_out_a_bits_opcode_0 = tlOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] tlIn_a_bits_param; // @[MixedNode.scala:551:17]
assign auto_tl_out_a_bits_param_0 = tlOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] tlIn_a_bits_size; // @[MixedNode.scala:551:17]
assign auto_tl_out_a_bits_size_0 = tlOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [3:0] tlIn_a_bits_source; // @[MixedNode.scala:551:17]
assign auto_tl_out_a_bits_source_0 = tlOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] tlIn_a_bits_address; // @[MixedNode.scala:551:17]
assign auto_tl_out_a_bits_address_0 = tlOut_a_bits_address; // @[MixedNode.scala:542:17]
wire tlIn_a_bits_user_amba_prot_bufferable; // @[MixedNode.scala:551:17]
assign auto_tl_out_a_bits_user_amba_prot_bufferable_0 = tlOut_a_bits_user_amba_prot_bufferable; // @[MixedNode.scala:542:17]
wire tlIn_a_bits_user_amba_prot_modifiable; // @[MixedNode.scala:551:17]
assign auto_tl_out_a_bits_user_amba_prot_modifiable_0 = tlOut_a_bits_user_amba_prot_modifiable; // @[MixedNode.scala:542:17]
wire tlIn_a_bits_user_amba_prot_readalloc; // @[MixedNode.scala:551:17]
assign auto_tl_out_a_bits_user_amba_prot_readalloc_0 = tlOut_a_bits_user_amba_prot_readalloc; // @[MixedNode.scala:542:17]
wire tlIn_a_bits_user_amba_prot_writealloc; // @[MixedNode.scala:551:17]
assign auto_tl_out_a_bits_user_amba_prot_writealloc_0 = tlOut_a_bits_user_amba_prot_writealloc; // @[MixedNode.scala:542:17]
wire tlIn_a_bits_user_amba_prot_privileged; // @[MixedNode.scala:551:17]
assign auto_tl_out_a_bits_user_amba_prot_privileged_0 = tlOut_a_bits_user_amba_prot_privileged; // @[MixedNode.scala:542:17]
wire tlIn_a_bits_user_amba_prot_secure; // @[MixedNode.scala:551:17]
assign auto_tl_out_a_bits_user_amba_prot_secure_0 = tlOut_a_bits_user_amba_prot_secure; // @[MixedNode.scala:542:17]
wire tlIn_a_bits_user_amba_prot_fetch; // @[MixedNode.scala:551:17]
assign auto_tl_out_a_bits_user_amba_prot_fetch_0 = tlOut_a_bits_user_amba_prot_fetch; // @[MixedNode.scala:542:17]
wire [7:0] tlIn_a_bits_mask; // @[MixedNode.scala:551:17]
assign auto_tl_out_a_bits_mask_0 = tlOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] tlIn_a_bits_data; // @[MixedNode.scala:551:17]
assign auto_tl_out_a_bits_data_0 = tlOut_a_bits_data; // @[MixedNode.scala:542:17]
wire tlIn_a_bits_corrupt; // @[MixedNode.scala:551:17]
assign auto_tl_out_a_bits_corrupt_0 = tlOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire tlIn_d_ready; // @[MixedNode.scala:551:17]
assign auto_tl_out_d_ready_0 = tlOut_d_ready; // @[MixedNode.scala:542:17]
wire tlIn_d_valid = tlOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
wire [2:0] tlIn_d_bits_opcode = tlOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
wire [1:0] tlIn_d_bits_param = tlOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
wire [3:0] tlIn_d_bits_size = tlOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
wire [3:0] tlIn_d_bits_source = tlOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
wire tlIn_d_bits_sink = tlOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
wire tlIn_d_bits_denied = tlOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
wire [63:0] tlIn_d_bits_data = tlOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
wire tlIn_d_bits_corrupt = tlOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_valid = tlIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_bits_opcode = tlIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_bits_param = tlIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_bits_size = tlIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_bits_source = tlIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_bits_address = tlIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_bits_user_amba_prot_bufferable = tlIn_a_bits_user_amba_prot_bufferable; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_bits_user_amba_prot_modifiable = tlIn_a_bits_user_amba_prot_modifiable; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_bits_user_amba_prot_readalloc = tlIn_a_bits_user_amba_prot_readalloc; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_bits_user_amba_prot_writealloc = tlIn_a_bits_user_amba_prot_writealloc; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_bits_user_amba_prot_privileged = tlIn_a_bits_user_amba_prot_privileged; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_bits_user_amba_prot_secure = tlIn_a_bits_user_amba_prot_secure; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_bits_user_amba_prot_fetch = tlIn_a_bits_user_amba_prot_fetch; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_bits_mask = tlIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_bits_data = tlIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_bits_corrupt = tlIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_d_ready = tlIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
TLBuffer_a32d64s4k1z4u buffer ( // @[Buffer.scala:75:28]
.clock (clock),
.reset (reset),
.auto_in_a_ready (_buffer_auto_in_a_ready),
.auto_in_a_valid (_fixer_auto_anon_out_a_valid), // @[FIFOFixer.scala:152:27]
.auto_in_a_bits_opcode (_fixer_auto_anon_out_a_bits_opcode), // @[FIFOFixer.scala:152:27]
.auto_in_a_bits_size (_fixer_auto_anon_out_a_bits_size), // @[FIFOFixer.scala:152:27]
.auto_in_a_bits_source (_fixer_auto_anon_out_a_bits_source), // @[FIFOFixer.scala:152:27]
.auto_in_a_bits_address (_fixer_auto_anon_out_a_bits_address), // @[FIFOFixer.scala:152:27]
.auto_in_a_bits_user_amba_prot_bufferable (_fixer_auto_anon_out_a_bits_user_amba_prot_bufferable), // @[FIFOFixer.scala:152:27]
.auto_in_a_bits_user_amba_prot_modifiable (_fixer_auto_anon_out_a_bits_user_amba_prot_modifiable), // @[FIFOFixer.scala:152:27]
.auto_in_a_bits_user_amba_prot_readalloc (_fixer_auto_anon_out_a_bits_user_amba_prot_readalloc), // @[FIFOFixer.scala:152:27]
.auto_in_a_bits_user_amba_prot_writealloc (_fixer_auto_anon_out_a_bits_user_amba_prot_writealloc), // @[FIFOFixer.scala:152:27]
.auto_in_a_bits_user_amba_prot_privileged (_fixer_auto_anon_out_a_bits_user_amba_prot_privileged), // @[FIFOFixer.scala:152:27]
.auto_in_a_bits_user_amba_prot_secure (_fixer_auto_anon_out_a_bits_user_amba_prot_secure), // @[FIFOFixer.scala:152:27]
.auto_in_a_bits_user_amba_prot_fetch (_fixer_auto_anon_out_a_bits_user_amba_prot_fetch), // @[FIFOFixer.scala:152:27]
.auto_in_a_bits_mask (_fixer_auto_anon_out_a_bits_mask), // @[FIFOFixer.scala:152:27]
.auto_in_a_bits_data (_fixer_auto_anon_out_a_bits_data), // @[FIFOFixer.scala:152:27]
.auto_in_d_ready (_fixer_auto_anon_out_d_ready), // @[FIFOFixer.scala:152:27]
.auto_in_d_valid (_buffer_auto_in_d_valid),
.auto_in_d_bits_opcode (_buffer_auto_in_d_bits_opcode),
.auto_in_d_bits_param (_buffer_auto_in_d_bits_param),
.auto_in_d_bits_size (_buffer_auto_in_d_bits_size),
.auto_in_d_bits_source (_buffer_auto_in_d_bits_source),
.auto_in_d_bits_sink (_buffer_auto_in_d_bits_sink),
.auto_in_d_bits_denied (_buffer_auto_in_d_bits_denied),
.auto_in_d_bits_data (_buffer_auto_in_d_bits_data),
.auto_in_d_bits_corrupt (_buffer_auto_in_d_bits_corrupt),
.auto_out_a_ready (tlIn_a_ready), // @[MixedNode.scala:551:17]
.auto_out_a_valid (tlIn_a_valid),
.auto_out_a_bits_opcode (tlIn_a_bits_opcode),
.auto_out_a_bits_param (tlIn_a_bits_param),
.auto_out_a_bits_size (tlIn_a_bits_size),
.auto_out_a_bits_source (tlIn_a_bits_source),
.auto_out_a_bits_address (tlIn_a_bits_address),
.auto_out_a_bits_user_amba_prot_bufferable (tlIn_a_bits_user_amba_prot_bufferable),
.auto_out_a_bits_user_amba_prot_modifiable (tlIn_a_bits_user_amba_prot_modifiable),
.auto_out_a_bits_user_amba_prot_readalloc (tlIn_a_bits_user_amba_prot_readalloc),
.auto_out_a_bits_user_amba_prot_writealloc (tlIn_a_bits_user_amba_prot_writealloc),
.auto_out_a_bits_user_amba_prot_privileged (tlIn_a_bits_user_amba_prot_privileged),
.auto_out_a_bits_user_amba_prot_secure (tlIn_a_bits_user_amba_prot_secure),
.auto_out_a_bits_user_amba_prot_fetch (tlIn_a_bits_user_amba_prot_fetch),
.auto_out_a_bits_mask (tlIn_a_bits_mask),
.auto_out_a_bits_data (tlIn_a_bits_data),
.auto_out_a_bits_corrupt (tlIn_a_bits_corrupt),
.auto_out_d_ready (tlIn_d_ready),
.auto_out_d_valid (tlIn_d_valid), // @[MixedNode.scala:551:17]
.auto_out_d_bits_opcode (tlIn_d_bits_opcode), // @[MixedNode.scala:551:17]
.auto_out_d_bits_param (tlIn_d_bits_param), // @[MixedNode.scala:551:17]
.auto_out_d_bits_size (tlIn_d_bits_size), // @[MixedNode.scala:551:17]
.auto_out_d_bits_source (tlIn_d_bits_source), // @[MixedNode.scala:551:17]
.auto_out_d_bits_sink (tlIn_d_bits_sink), // @[MixedNode.scala:551:17]
.auto_out_d_bits_denied (tlIn_d_bits_denied), // @[MixedNode.scala:551:17]
.auto_out_d_bits_data (tlIn_d_bits_data), // @[MixedNode.scala:551:17]
.auto_out_d_bits_corrupt (tlIn_d_bits_corrupt) // @[MixedNode.scala:551:17]
); // @[Buffer.scala:75:28]
TLFIFOFixer_2 fixer ( // @[FIFOFixer.scala:152:27]
.clock (clock),
.reset (reset),
.auto_anon_in_a_ready (widget_auto_anon_out_a_ready),
.auto_anon_in_a_valid (widget_auto_anon_out_a_valid), // @[WidthWidget.scala:27:9]
.auto_anon_in_a_bits_opcode (widget_auto_anon_out_a_bits_opcode), // @[WidthWidget.scala:27:9]
.auto_anon_in_a_bits_size (widget_auto_anon_out_a_bits_size), // @[WidthWidget.scala:27:9]
.auto_anon_in_a_bits_source (widget_auto_anon_out_a_bits_source), // @[WidthWidget.scala:27:9]
.auto_anon_in_a_bits_address (widget_auto_anon_out_a_bits_address), // @[WidthWidget.scala:27:9]
.auto_anon_in_a_bits_user_amba_prot_bufferable (widget_auto_anon_out_a_bits_user_amba_prot_bufferable), // @[WidthWidget.scala:27:9]
.auto_anon_in_a_bits_user_amba_prot_modifiable (widget_auto_anon_out_a_bits_user_amba_prot_modifiable), // @[WidthWidget.scala:27:9]
.auto_anon_in_a_bits_user_amba_prot_readalloc (widget_auto_anon_out_a_bits_user_amba_prot_readalloc), // @[WidthWidget.scala:27:9]
.auto_anon_in_a_bits_user_amba_prot_writealloc (widget_auto_anon_out_a_bits_user_amba_prot_writealloc), // @[WidthWidget.scala:27:9]
.auto_anon_in_a_bits_user_amba_prot_privileged (widget_auto_anon_out_a_bits_user_amba_prot_privileged), // @[WidthWidget.scala:27:9]
.auto_anon_in_a_bits_user_amba_prot_secure (widget_auto_anon_out_a_bits_user_amba_prot_secure), // @[WidthWidget.scala:27:9]
.auto_anon_in_a_bits_user_amba_prot_fetch (widget_auto_anon_out_a_bits_user_amba_prot_fetch), // @[WidthWidget.scala:27:9]
.auto_anon_in_a_bits_mask (widget_auto_anon_out_a_bits_mask), // @[WidthWidget.scala:27:9]
.auto_anon_in_a_bits_data (widget_auto_anon_out_a_bits_data), // @[WidthWidget.scala:27:9]
.auto_anon_in_d_ready (widget_auto_anon_out_d_ready), // @[WidthWidget.scala:27:9]
.auto_anon_in_d_valid (widget_auto_anon_out_d_valid),
.auto_anon_in_d_bits_opcode (widget_auto_anon_out_d_bits_opcode),
.auto_anon_in_d_bits_param (widget_auto_anon_out_d_bits_param),
.auto_anon_in_d_bits_size (widget_auto_anon_out_d_bits_size),
.auto_anon_in_d_bits_source (widget_auto_anon_out_d_bits_source),
.auto_anon_in_d_bits_sink (widget_auto_anon_out_d_bits_sink),
.auto_anon_in_d_bits_denied (widget_auto_anon_out_d_bits_denied),
.auto_anon_in_d_bits_data (widget_auto_anon_out_d_bits_data),
.auto_anon_in_d_bits_corrupt (widget_auto_anon_out_d_bits_corrupt),
.auto_anon_out_a_ready (_buffer_auto_in_a_ready), // @[Buffer.scala:75:28]
.auto_anon_out_a_valid (_fixer_auto_anon_out_a_valid),
.auto_anon_out_a_bits_opcode (_fixer_auto_anon_out_a_bits_opcode),
.auto_anon_out_a_bits_size (_fixer_auto_anon_out_a_bits_size),
.auto_anon_out_a_bits_source (_fixer_auto_anon_out_a_bits_source),
.auto_anon_out_a_bits_address (_fixer_auto_anon_out_a_bits_address),
.auto_anon_out_a_bits_user_amba_prot_bufferable (_fixer_auto_anon_out_a_bits_user_amba_prot_bufferable),
.auto_anon_out_a_bits_user_amba_prot_modifiable (_fixer_auto_anon_out_a_bits_user_amba_prot_modifiable),
.auto_anon_out_a_bits_user_amba_prot_readalloc (_fixer_auto_anon_out_a_bits_user_amba_prot_readalloc),
.auto_anon_out_a_bits_user_amba_prot_writealloc (_fixer_auto_anon_out_a_bits_user_amba_prot_writealloc),
.auto_anon_out_a_bits_user_amba_prot_privileged (_fixer_auto_anon_out_a_bits_user_amba_prot_privileged),
.auto_anon_out_a_bits_user_amba_prot_secure (_fixer_auto_anon_out_a_bits_user_amba_prot_secure),
.auto_anon_out_a_bits_user_amba_prot_fetch (_fixer_auto_anon_out_a_bits_user_amba_prot_fetch),
.auto_anon_out_a_bits_mask (_fixer_auto_anon_out_a_bits_mask),
.auto_anon_out_a_bits_data (_fixer_auto_anon_out_a_bits_data),
.auto_anon_out_d_ready (_fixer_auto_anon_out_d_ready),
.auto_anon_out_d_valid (_buffer_auto_in_d_valid), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_opcode (_buffer_auto_in_d_bits_opcode), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_param (_buffer_auto_in_d_bits_param), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_size (_buffer_auto_in_d_bits_size), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_source (_buffer_auto_in_d_bits_source), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_sink (_buffer_auto_in_d_bits_sink), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_denied (_buffer_auto_in_d_bits_denied), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_data (_buffer_auto_in_d_bits_data), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_corrupt (_buffer_auto_in_d_bits_corrupt) // @[Buffer.scala:75:28]
); // @[FIFOFixer.scala:152:27]
AXI4ToTL axi42tl ( // @[ToTL.scala:238:29]
.clock (clock),
.reset (reset),
.auto_in_aw_ready (_axi42tl_auto_in_aw_ready),
.auto_in_aw_valid (_axi4yank_auto_out_aw_valid), // @[UserYanker.scala:125:30]
.auto_in_aw_bits_id (_axi4yank_auto_out_aw_bits_id), // @[UserYanker.scala:125:30]
.auto_in_aw_bits_addr (_axi4yank_auto_out_aw_bits_addr), // @[UserYanker.scala:125:30]
.auto_in_aw_bits_len (_axi4yank_auto_out_aw_bits_len), // @[UserYanker.scala:125:30]
.auto_in_aw_bits_size (_axi4yank_auto_out_aw_bits_size), // @[UserYanker.scala:125:30]
.auto_in_aw_bits_burst (_axi4yank_auto_out_aw_bits_burst), // @[UserYanker.scala:125:30]
.auto_in_aw_bits_lock (_axi4yank_auto_out_aw_bits_lock), // @[UserYanker.scala:125:30]
.auto_in_aw_bits_cache (_axi4yank_auto_out_aw_bits_cache), // @[UserYanker.scala:125:30]
.auto_in_aw_bits_prot (_axi4yank_auto_out_aw_bits_prot), // @[UserYanker.scala:125:30]
.auto_in_aw_bits_qos (_axi4yank_auto_out_aw_bits_qos), // @[UserYanker.scala:125:30]
.auto_in_w_ready (_axi42tl_auto_in_w_ready),
.auto_in_w_valid (_axi4yank_auto_out_w_valid), // @[UserYanker.scala:125:30]
.auto_in_w_bits_data (_axi4yank_auto_out_w_bits_data), // @[UserYanker.scala:125:30]
.auto_in_w_bits_strb (_axi4yank_auto_out_w_bits_strb), // @[UserYanker.scala:125:30]
.auto_in_w_bits_last (_axi4yank_auto_out_w_bits_last), // @[UserYanker.scala:125:30]
.auto_in_b_ready (_axi4yank_auto_out_b_ready), // @[UserYanker.scala:125:30]
.auto_in_b_valid (_axi42tl_auto_in_b_valid),
.auto_in_b_bits_id (_axi42tl_auto_in_b_bits_id),
.auto_in_b_bits_resp (_axi42tl_auto_in_b_bits_resp),
.auto_in_ar_ready (_axi42tl_auto_in_ar_ready),
.auto_in_ar_valid (_axi4yank_auto_out_ar_valid), // @[UserYanker.scala:125:30]
.auto_in_ar_bits_id (_axi4yank_auto_out_ar_bits_id), // @[UserYanker.scala:125:30]
.auto_in_ar_bits_addr (_axi4yank_auto_out_ar_bits_addr), // @[UserYanker.scala:125:30]
.auto_in_ar_bits_len (_axi4yank_auto_out_ar_bits_len), // @[UserYanker.scala:125:30]
.auto_in_ar_bits_size (_axi4yank_auto_out_ar_bits_size), // @[UserYanker.scala:125:30]
.auto_in_ar_bits_burst (_axi4yank_auto_out_ar_bits_burst), // @[UserYanker.scala:125:30]
.auto_in_ar_bits_lock (_axi4yank_auto_out_ar_bits_lock), // @[UserYanker.scala:125:30]
.auto_in_ar_bits_cache (_axi4yank_auto_out_ar_bits_cache), // @[UserYanker.scala:125:30]
.auto_in_ar_bits_prot (_axi4yank_auto_out_ar_bits_prot), // @[UserYanker.scala:125:30]
.auto_in_ar_bits_qos (_axi4yank_auto_out_ar_bits_qos), // @[UserYanker.scala:125:30]
.auto_in_r_ready (_axi4yank_auto_out_r_ready), // @[UserYanker.scala:125:30]
.auto_in_r_valid (_axi42tl_auto_in_r_valid),
.auto_in_r_bits_id (_axi42tl_auto_in_r_bits_id),
.auto_in_r_bits_data (_axi42tl_auto_in_r_bits_data),
.auto_in_r_bits_resp (_axi42tl_auto_in_r_bits_resp),
.auto_in_r_bits_last (_axi42tl_auto_in_r_bits_last),
.auto_out_a_ready (widget_auto_anon_in_a_ready), // @[WidthWidget.scala:27:9]
.auto_out_a_valid (widget_auto_anon_in_a_valid),
.auto_out_a_bits_opcode (widget_auto_anon_in_a_bits_opcode),
.auto_out_a_bits_size (widget_auto_anon_in_a_bits_size),
.auto_out_a_bits_source (widget_auto_anon_in_a_bits_source),
.auto_out_a_bits_address (widget_auto_anon_in_a_bits_address),
.auto_out_a_bits_user_amba_prot_bufferable (widget_auto_anon_in_a_bits_user_amba_prot_bufferable),
.auto_out_a_bits_user_amba_prot_modifiable (widget_auto_anon_in_a_bits_user_amba_prot_modifiable),
.auto_out_a_bits_user_amba_prot_readalloc (widget_auto_anon_in_a_bits_user_amba_prot_readalloc),
.auto_out_a_bits_user_amba_prot_writealloc (widget_auto_anon_in_a_bits_user_amba_prot_writealloc),
.auto_out_a_bits_user_amba_prot_privileged (widget_auto_anon_in_a_bits_user_amba_prot_privileged),
.auto_out_a_bits_user_amba_prot_secure (widget_auto_anon_in_a_bits_user_amba_prot_secure),
.auto_out_a_bits_user_amba_prot_fetch (widget_auto_anon_in_a_bits_user_amba_prot_fetch),
.auto_out_a_bits_mask (widget_auto_anon_in_a_bits_mask),
.auto_out_a_bits_data (widget_auto_anon_in_a_bits_data),
.auto_out_d_ready (widget_auto_anon_in_d_ready),
.auto_out_d_valid (widget_auto_anon_in_d_valid), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_opcode (widget_auto_anon_in_d_bits_opcode), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_param (widget_auto_anon_in_d_bits_param), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_size (widget_auto_anon_in_d_bits_size), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_source (widget_auto_anon_in_d_bits_source), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_sink (widget_auto_anon_in_d_bits_sink), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_denied (widget_auto_anon_in_d_bits_denied), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_data (widget_auto_anon_in_d_bits_data), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_corrupt (widget_auto_anon_in_d_bits_corrupt) // @[WidthWidget.scala:27:9]
); // @[ToTL.scala:238:29]
AXI4UserYanker_1 axi4yank ( // @[UserYanker.scala:125:30]
.clock (clock),
.reset (reset),
.auto_in_aw_ready (_axi4yank_auto_in_aw_ready),
.auto_in_aw_valid (_axi4frag_auto_out_aw_valid), // @[Fragmenter.scala:224:30]
.auto_in_aw_bits_id (_axi4frag_auto_out_aw_bits_id), // @[Fragmenter.scala:224:30]
.auto_in_aw_bits_addr (_axi4frag_auto_out_aw_bits_addr), // @[Fragmenter.scala:224:30]
.auto_in_aw_bits_len (_axi4frag_auto_out_aw_bits_len), // @[Fragmenter.scala:224:30]
.auto_in_aw_bits_size (_axi4frag_auto_out_aw_bits_size), // @[Fragmenter.scala:224:30]
.auto_in_aw_bits_burst (_axi4frag_auto_out_aw_bits_burst), // @[Fragmenter.scala:224:30]
.auto_in_aw_bits_lock (_axi4frag_auto_out_aw_bits_lock), // @[Fragmenter.scala:224:30]
.auto_in_aw_bits_cache (_axi4frag_auto_out_aw_bits_cache), // @[Fragmenter.scala:224:30]
.auto_in_aw_bits_prot (_axi4frag_auto_out_aw_bits_prot), // @[Fragmenter.scala:224:30]
.auto_in_aw_bits_qos (_axi4frag_auto_out_aw_bits_qos), // @[Fragmenter.scala:224:30]
.auto_in_aw_bits_echo_extra_id (_axi4frag_auto_out_aw_bits_echo_extra_id), // @[Fragmenter.scala:224:30]
.auto_in_aw_bits_echo_real_last (_axi4frag_auto_out_aw_bits_echo_real_last), // @[Fragmenter.scala:224:30]
.auto_in_w_ready (_axi4yank_auto_in_w_ready),
.auto_in_w_valid (_axi4frag_auto_out_w_valid), // @[Fragmenter.scala:224:30]
.auto_in_w_bits_data (_axi4frag_auto_out_w_bits_data), // @[Fragmenter.scala:224:30]
.auto_in_w_bits_strb (_axi4frag_auto_out_w_bits_strb), // @[Fragmenter.scala:224:30]
.auto_in_w_bits_last (_axi4frag_auto_out_w_bits_last), // @[Fragmenter.scala:224:30]
.auto_in_b_ready (_axi4frag_auto_out_b_ready), // @[Fragmenter.scala:224:30]
.auto_in_b_valid (_axi4yank_auto_in_b_valid),
.auto_in_b_bits_id (_axi4yank_auto_in_b_bits_id),
.auto_in_b_bits_resp (_axi4yank_auto_in_b_bits_resp),
.auto_in_b_bits_echo_extra_id (_axi4yank_auto_in_b_bits_echo_extra_id),
.auto_in_b_bits_echo_real_last (_axi4yank_auto_in_b_bits_echo_real_last),
.auto_in_ar_ready (_axi4yank_auto_in_ar_ready),
.auto_in_ar_valid (_axi4frag_auto_out_ar_valid), // @[Fragmenter.scala:224:30]
.auto_in_ar_bits_id (_axi4frag_auto_out_ar_bits_id), // @[Fragmenter.scala:224:30]
.auto_in_ar_bits_addr (_axi4frag_auto_out_ar_bits_addr), // @[Fragmenter.scala:224:30]
.auto_in_ar_bits_len (_axi4frag_auto_out_ar_bits_len), // @[Fragmenter.scala:224:30]
.auto_in_ar_bits_size (_axi4frag_auto_out_ar_bits_size), // @[Fragmenter.scala:224:30]
.auto_in_ar_bits_burst (_axi4frag_auto_out_ar_bits_burst), // @[Fragmenter.scala:224:30]
.auto_in_ar_bits_lock (_axi4frag_auto_out_ar_bits_lock), // @[Fragmenter.scala:224:30]
.auto_in_ar_bits_cache (_axi4frag_auto_out_ar_bits_cache), // @[Fragmenter.scala:224:30]
.auto_in_ar_bits_prot (_axi4frag_auto_out_ar_bits_prot), // @[Fragmenter.scala:224:30]
.auto_in_ar_bits_qos (_axi4frag_auto_out_ar_bits_qos), // @[Fragmenter.scala:224:30]
.auto_in_ar_bits_echo_extra_id (_axi4frag_auto_out_ar_bits_echo_extra_id), // @[Fragmenter.scala:224:30]
.auto_in_ar_bits_echo_real_last (_axi4frag_auto_out_ar_bits_echo_real_last), // @[Fragmenter.scala:224:30]
.auto_in_r_ready (_axi4frag_auto_out_r_ready), // @[Fragmenter.scala:224:30]
.auto_in_r_valid (_axi4yank_auto_in_r_valid),
.auto_in_r_bits_id (_axi4yank_auto_in_r_bits_id),
.auto_in_r_bits_data (_axi4yank_auto_in_r_bits_data),
.auto_in_r_bits_resp (_axi4yank_auto_in_r_bits_resp),
.auto_in_r_bits_echo_extra_id (_axi4yank_auto_in_r_bits_echo_extra_id),
.auto_in_r_bits_echo_real_last (_axi4yank_auto_in_r_bits_echo_real_last),
.auto_in_r_bits_last (_axi4yank_auto_in_r_bits_last),
.auto_out_aw_ready (_axi42tl_auto_in_aw_ready), // @[ToTL.scala:238:29]
.auto_out_aw_valid (_axi4yank_auto_out_aw_valid),
.auto_out_aw_bits_id (_axi4yank_auto_out_aw_bits_id),
.auto_out_aw_bits_addr (_axi4yank_auto_out_aw_bits_addr),
.auto_out_aw_bits_len (_axi4yank_auto_out_aw_bits_len),
.auto_out_aw_bits_size (_axi4yank_auto_out_aw_bits_size),
.auto_out_aw_bits_burst (_axi4yank_auto_out_aw_bits_burst),
.auto_out_aw_bits_lock (_axi4yank_auto_out_aw_bits_lock),
.auto_out_aw_bits_cache (_axi4yank_auto_out_aw_bits_cache),
.auto_out_aw_bits_prot (_axi4yank_auto_out_aw_bits_prot),
.auto_out_aw_bits_qos (_axi4yank_auto_out_aw_bits_qos),
.auto_out_w_ready (_axi42tl_auto_in_w_ready), // @[ToTL.scala:238:29]
.auto_out_w_valid (_axi4yank_auto_out_w_valid),
.auto_out_w_bits_data (_axi4yank_auto_out_w_bits_data),
.auto_out_w_bits_strb (_axi4yank_auto_out_w_bits_strb),
.auto_out_w_bits_last (_axi4yank_auto_out_w_bits_last),
.auto_out_b_ready (_axi4yank_auto_out_b_ready),
.auto_out_b_valid (_axi42tl_auto_in_b_valid), // @[ToTL.scala:238:29]
.auto_out_b_bits_id (_axi42tl_auto_in_b_bits_id), // @[ToTL.scala:238:29]
.auto_out_b_bits_resp (_axi42tl_auto_in_b_bits_resp), // @[ToTL.scala:238:29]
.auto_out_ar_ready (_axi42tl_auto_in_ar_ready), // @[ToTL.scala:238:29]
.auto_out_ar_valid (_axi4yank_auto_out_ar_valid),
.auto_out_ar_bits_id (_axi4yank_auto_out_ar_bits_id),
.auto_out_ar_bits_addr (_axi4yank_auto_out_ar_bits_addr),
.auto_out_ar_bits_len (_axi4yank_auto_out_ar_bits_len),
.auto_out_ar_bits_size (_axi4yank_auto_out_ar_bits_size),
.auto_out_ar_bits_burst (_axi4yank_auto_out_ar_bits_burst),
.auto_out_ar_bits_lock (_axi4yank_auto_out_ar_bits_lock),
.auto_out_ar_bits_cache (_axi4yank_auto_out_ar_bits_cache),
.auto_out_ar_bits_prot (_axi4yank_auto_out_ar_bits_prot),
.auto_out_ar_bits_qos (_axi4yank_auto_out_ar_bits_qos),
.auto_out_r_ready (_axi4yank_auto_out_r_ready),
.auto_out_r_valid (_axi42tl_auto_in_r_valid), // @[ToTL.scala:238:29]
.auto_out_r_bits_id (_axi42tl_auto_in_r_bits_id), // @[ToTL.scala:238:29]
.auto_out_r_bits_data (_axi42tl_auto_in_r_bits_data), // @[ToTL.scala:238:29]
.auto_out_r_bits_resp (_axi42tl_auto_in_r_bits_resp), // @[ToTL.scala:238:29]
.auto_out_r_bits_last (_axi42tl_auto_in_r_bits_last) // @[ToTL.scala:238:29]
); // @[UserYanker.scala:125:30]
AXI4Fragmenter axi4frag ( // @[Fragmenter.scala:224:30]
.clock (clock),
.reset (reset),
.auto_in_aw_ready (_axi4frag_auto_in_aw_ready),
.auto_in_aw_valid (_axi4index_auto_out_aw_valid), // @[IdIndexer.scala:108:31]
.auto_in_aw_bits_id (_axi4index_auto_out_aw_bits_id), // @[IdIndexer.scala:108:31]
.auto_in_aw_bits_addr (_axi4index_auto_out_aw_bits_addr), // @[IdIndexer.scala:108:31]
.auto_in_aw_bits_len (_axi4index_auto_out_aw_bits_len), // @[IdIndexer.scala:108:31]
.auto_in_aw_bits_size (_axi4index_auto_out_aw_bits_size), // @[IdIndexer.scala:108:31]
.auto_in_aw_bits_burst (_axi4index_auto_out_aw_bits_burst), // @[IdIndexer.scala:108:31]
.auto_in_aw_bits_lock (_axi4index_auto_out_aw_bits_lock), // @[IdIndexer.scala:108:31]
.auto_in_aw_bits_cache (_axi4index_auto_out_aw_bits_cache), // @[IdIndexer.scala:108:31]
.auto_in_aw_bits_prot (_axi4index_auto_out_aw_bits_prot), // @[IdIndexer.scala:108:31]
.auto_in_aw_bits_qos (_axi4index_auto_out_aw_bits_qos), // @[IdIndexer.scala:108:31]
.auto_in_aw_bits_echo_extra_id (_axi4index_auto_out_aw_bits_echo_extra_id), // @[IdIndexer.scala:108:31]
.auto_in_w_ready (_axi4frag_auto_in_w_ready),
.auto_in_w_valid (_axi4index_auto_out_w_valid), // @[IdIndexer.scala:108:31]
.auto_in_w_bits_data (_axi4index_auto_out_w_bits_data), // @[IdIndexer.scala:108:31]
.auto_in_w_bits_strb (_axi4index_auto_out_w_bits_strb), // @[IdIndexer.scala:108:31]
.auto_in_w_bits_last (_axi4index_auto_out_w_bits_last), // @[IdIndexer.scala:108:31]
.auto_in_b_ready (_axi4index_auto_out_b_ready), // @[IdIndexer.scala:108:31]
.auto_in_b_valid (_axi4frag_auto_in_b_valid),
.auto_in_b_bits_id (_axi4frag_auto_in_b_bits_id),
.auto_in_b_bits_resp (_axi4frag_auto_in_b_bits_resp),
.auto_in_b_bits_echo_extra_id (_axi4frag_auto_in_b_bits_echo_extra_id),
.auto_in_ar_ready (_axi4frag_auto_in_ar_ready),
.auto_in_ar_valid (_axi4index_auto_out_ar_valid), // @[IdIndexer.scala:108:31]
.auto_in_ar_bits_id (_axi4index_auto_out_ar_bits_id), // @[IdIndexer.scala:108:31]
.auto_in_ar_bits_addr (_axi4index_auto_out_ar_bits_addr), // @[IdIndexer.scala:108:31]
.auto_in_ar_bits_len (_axi4index_auto_out_ar_bits_len), // @[IdIndexer.scala:108:31]
.auto_in_ar_bits_size (_axi4index_auto_out_ar_bits_size), // @[IdIndexer.scala:108:31]
.auto_in_ar_bits_burst (_axi4index_auto_out_ar_bits_burst), // @[IdIndexer.scala:108:31]
.auto_in_ar_bits_lock (_axi4index_auto_out_ar_bits_lock), // @[IdIndexer.scala:108:31]
.auto_in_ar_bits_cache (_axi4index_auto_out_ar_bits_cache), // @[IdIndexer.scala:108:31]
.auto_in_ar_bits_prot (_axi4index_auto_out_ar_bits_prot), // @[IdIndexer.scala:108:31]
.auto_in_ar_bits_qos (_axi4index_auto_out_ar_bits_qos), // @[IdIndexer.scala:108:31]
.auto_in_ar_bits_echo_extra_id (_axi4index_auto_out_ar_bits_echo_extra_id), // @[IdIndexer.scala:108:31]
.auto_in_r_ready (_axi4index_auto_out_r_ready), // @[IdIndexer.scala:108:31]
.auto_in_r_valid (_axi4frag_auto_in_r_valid),
.auto_in_r_bits_id (_axi4frag_auto_in_r_bits_id),
.auto_in_r_bits_data (_axi4frag_auto_in_r_bits_data),
.auto_in_r_bits_resp (_axi4frag_auto_in_r_bits_resp),
.auto_in_r_bits_echo_extra_id (_axi4frag_auto_in_r_bits_echo_extra_id),
.auto_in_r_bits_last (_axi4frag_auto_in_r_bits_last),
.auto_out_aw_ready (_axi4yank_auto_in_aw_ready), // @[UserYanker.scala:125:30]
.auto_out_aw_valid (_axi4frag_auto_out_aw_valid),
.auto_out_aw_bits_id (_axi4frag_auto_out_aw_bits_id),
.auto_out_aw_bits_addr (_axi4frag_auto_out_aw_bits_addr),
.auto_out_aw_bits_len (_axi4frag_auto_out_aw_bits_len),
.auto_out_aw_bits_size (_axi4frag_auto_out_aw_bits_size),
.auto_out_aw_bits_burst (_axi4frag_auto_out_aw_bits_burst),
.auto_out_aw_bits_lock (_axi4frag_auto_out_aw_bits_lock),
.auto_out_aw_bits_cache (_axi4frag_auto_out_aw_bits_cache),
.auto_out_aw_bits_prot (_axi4frag_auto_out_aw_bits_prot),
.auto_out_aw_bits_qos (_axi4frag_auto_out_aw_bits_qos),
.auto_out_aw_bits_echo_extra_id (_axi4frag_auto_out_aw_bits_echo_extra_id),
.auto_out_aw_bits_echo_real_last (_axi4frag_auto_out_aw_bits_echo_real_last),
.auto_out_w_ready (_axi4yank_auto_in_w_ready), // @[UserYanker.scala:125:30]
.auto_out_w_valid (_axi4frag_auto_out_w_valid),
.auto_out_w_bits_data (_axi4frag_auto_out_w_bits_data),
.auto_out_w_bits_strb (_axi4frag_auto_out_w_bits_strb),
.auto_out_w_bits_last (_axi4frag_auto_out_w_bits_last),
.auto_out_b_ready (_axi4frag_auto_out_b_ready),
.auto_out_b_valid (_axi4yank_auto_in_b_valid), // @[UserYanker.scala:125:30]
.auto_out_b_bits_id (_axi4yank_auto_in_b_bits_id), // @[UserYanker.scala:125:30]
.auto_out_b_bits_resp (_axi4yank_auto_in_b_bits_resp), // @[UserYanker.scala:125:30]
.auto_out_b_bits_echo_extra_id (_axi4yank_auto_in_b_bits_echo_extra_id), // @[UserYanker.scala:125:30]
.auto_out_b_bits_echo_real_last (_axi4yank_auto_in_b_bits_echo_real_last), // @[UserYanker.scala:125:30]
.auto_out_ar_ready (_axi4yank_auto_in_ar_ready), // @[UserYanker.scala:125:30]
.auto_out_ar_valid (_axi4frag_auto_out_ar_valid),
.auto_out_ar_bits_id (_axi4frag_auto_out_ar_bits_id),
.auto_out_ar_bits_addr (_axi4frag_auto_out_ar_bits_addr),
.auto_out_ar_bits_len (_axi4frag_auto_out_ar_bits_len),
.auto_out_ar_bits_size (_axi4frag_auto_out_ar_bits_size),
.auto_out_ar_bits_burst (_axi4frag_auto_out_ar_bits_burst),
.auto_out_ar_bits_lock (_axi4frag_auto_out_ar_bits_lock),
.auto_out_ar_bits_cache (_axi4frag_auto_out_ar_bits_cache),
.auto_out_ar_bits_prot (_axi4frag_auto_out_ar_bits_prot),
.auto_out_ar_bits_qos (_axi4frag_auto_out_ar_bits_qos),
.auto_out_ar_bits_echo_extra_id (_axi4frag_auto_out_ar_bits_echo_extra_id),
.auto_out_ar_bits_echo_real_last (_axi4frag_auto_out_ar_bits_echo_real_last),
.auto_out_r_ready (_axi4frag_auto_out_r_ready),
.auto_out_r_valid (_axi4yank_auto_in_r_valid), // @[UserYanker.scala:125:30]
.auto_out_r_bits_id (_axi4yank_auto_in_r_bits_id), // @[UserYanker.scala:125:30]
.auto_out_r_bits_data (_axi4yank_auto_in_r_bits_data), // @[UserYanker.scala:125:30]
.auto_out_r_bits_resp (_axi4yank_auto_in_r_bits_resp), // @[UserYanker.scala:125:30]
.auto_out_r_bits_echo_extra_id (_axi4yank_auto_in_r_bits_echo_extra_id), // @[UserYanker.scala:125:30]
.auto_out_r_bits_echo_real_last (_axi4yank_auto_in_r_bits_echo_real_last), // @[UserYanker.scala:125:30]
.auto_out_r_bits_last (_axi4yank_auto_in_r_bits_last) // @[UserYanker.scala:125:30]
); // @[Fragmenter.scala:224:30]
AXI4IdIndexer_1 axi4index ( // @[IdIndexer.scala:108:31]
.clock (clock),
.reset (reset),
.auto_in_aw_ready (auto_axi4index_in_aw_ready_0),
.auto_in_aw_valid (auto_axi4index_in_aw_valid_0), // @[LazyModuleImp.scala:138:7]
.auto_in_aw_bits_id (auto_axi4index_in_aw_bits_id_0), // @[LazyModuleImp.scala:138:7]
.auto_in_aw_bits_addr (auto_axi4index_in_aw_bits_addr_0), // @[LazyModuleImp.scala:138:7]
.auto_in_aw_bits_len (auto_axi4index_in_aw_bits_len_0), // @[LazyModuleImp.scala:138:7]
.auto_in_aw_bits_size (auto_axi4index_in_aw_bits_size_0), // @[LazyModuleImp.scala:138:7]
.auto_in_aw_bits_burst (auto_axi4index_in_aw_bits_burst_0), // @[LazyModuleImp.scala:138:7]
.auto_in_aw_bits_lock (auto_axi4index_in_aw_bits_lock_0), // @[LazyModuleImp.scala:138:7]
.auto_in_aw_bits_cache (auto_axi4index_in_aw_bits_cache_0), // @[LazyModuleImp.scala:138:7]
.auto_in_aw_bits_prot (auto_axi4index_in_aw_bits_prot_0), // @[LazyModuleImp.scala:138:7]
.auto_in_aw_bits_qos (auto_axi4index_in_aw_bits_qos_0), // @[LazyModuleImp.scala:138:7]
.auto_in_w_ready (auto_axi4index_in_w_ready_0),
.auto_in_w_valid (auto_axi4index_in_w_valid_0), // @[LazyModuleImp.scala:138:7]
.auto_in_w_bits_data (auto_axi4index_in_w_bits_data_0), // @[LazyModuleImp.scala:138:7]
.auto_in_w_bits_strb (auto_axi4index_in_w_bits_strb_0), // @[LazyModuleImp.scala:138:7]
.auto_in_w_bits_last (auto_axi4index_in_w_bits_last_0), // @[LazyModuleImp.scala:138:7]
.auto_in_b_ready (auto_axi4index_in_b_ready_0), // @[LazyModuleImp.scala:138:7]
.auto_in_b_valid (auto_axi4index_in_b_valid_0),
.auto_in_b_bits_id (auto_axi4index_in_b_bits_id_0),
.auto_in_b_bits_resp (auto_axi4index_in_b_bits_resp_0),
.auto_in_ar_ready (auto_axi4index_in_ar_ready_0),
.auto_in_ar_valid (auto_axi4index_in_ar_valid_0), // @[LazyModuleImp.scala:138:7]
.auto_in_ar_bits_id (auto_axi4index_in_ar_bits_id_0), // @[LazyModuleImp.scala:138:7]
.auto_in_ar_bits_addr (auto_axi4index_in_ar_bits_addr_0), // @[LazyModuleImp.scala:138:7]
.auto_in_ar_bits_len (auto_axi4index_in_ar_bits_len_0), // @[LazyModuleImp.scala:138:7]
.auto_in_ar_bits_size (auto_axi4index_in_ar_bits_size_0), // @[LazyModuleImp.scala:138:7]
.auto_in_ar_bits_burst (auto_axi4index_in_ar_bits_burst_0), // @[LazyModuleImp.scala:138:7]
.auto_in_ar_bits_lock (auto_axi4index_in_ar_bits_lock_0), // @[LazyModuleImp.scala:138:7]
.auto_in_ar_bits_cache (auto_axi4index_in_ar_bits_cache_0), // @[LazyModuleImp.scala:138:7]
.auto_in_ar_bits_prot (auto_axi4index_in_ar_bits_prot_0), // @[LazyModuleImp.scala:138:7]
.auto_in_ar_bits_qos (auto_axi4index_in_ar_bits_qos_0), // @[LazyModuleImp.scala:138:7]
.auto_in_r_ready (auto_axi4index_in_r_ready_0), // @[LazyModuleImp.scala:138:7]
.auto_in_r_valid (auto_axi4index_in_r_valid_0),
.auto_in_r_bits_id (auto_axi4index_in_r_bits_id_0),
.auto_in_r_bits_data (auto_axi4index_in_r_bits_data_0),
.auto_in_r_bits_resp (auto_axi4index_in_r_bits_resp_0),
.auto_in_r_bits_last (auto_axi4index_in_r_bits_last_0),
.auto_out_aw_ready (_axi4frag_auto_in_aw_ready), // @[Fragmenter.scala:224:30]
.auto_out_aw_valid (_axi4index_auto_out_aw_valid),
.auto_out_aw_bits_id (_axi4index_auto_out_aw_bits_id),
.auto_out_aw_bits_addr (_axi4index_auto_out_aw_bits_addr),
.auto_out_aw_bits_len (_axi4index_auto_out_aw_bits_len),
.auto_out_aw_bits_size (_axi4index_auto_out_aw_bits_size),
.auto_out_aw_bits_burst (_axi4index_auto_out_aw_bits_burst),
.auto_out_aw_bits_lock (_axi4index_auto_out_aw_bits_lock),
.auto_out_aw_bits_cache (_axi4index_auto_out_aw_bits_cache),
.auto_out_aw_bits_prot (_axi4index_auto_out_aw_bits_prot),
.auto_out_aw_bits_qos (_axi4index_auto_out_aw_bits_qos),
.auto_out_aw_bits_echo_extra_id (_axi4index_auto_out_aw_bits_echo_extra_id),
.auto_out_w_ready (_axi4frag_auto_in_w_ready), // @[Fragmenter.scala:224:30]
.auto_out_w_valid (_axi4index_auto_out_w_valid),
.auto_out_w_bits_data (_axi4index_auto_out_w_bits_data),
.auto_out_w_bits_strb (_axi4index_auto_out_w_bits_strb),
.auto_out_w_bits_last (_axi4index_auto_out_w_bits_last),
.auto_out_b_ready (_axi4index_auto_out_b_ready),
.auto_out_b_valid (_axi4frag_auto_in_b_valid), // @[Fragmenter.scala:224:30]
.auto_out_b_bits_id (_axi4frag_auto_in_b_bits_id), // @[Fragmenter.scala:224:30]
.auto_out_b_bits_resp (_axi4frag_auto_in_b_bits_resp), // @[Fragmenter.scala:224:30]
.auto_out_b_bits_echo_extra_id (_axi4frag_auto_in_b_bits_echo_extra_id), // @[Fragmenter.scala:224:30]
.auto_out_ar_ready (_axi4frag_auto_in_ar_ready), // @[Fragmenter.scala:224:30]
.auto_out_ar_valid (_axi4index_auto_out_ar_valid),
.auto_out_ar_bits_id (_axi4index_auto_out_ar_bits_id),
.auto_out_ar_bits_addr (_axi4index_auto_out_ar_bits_addr),
.auto_out_ar_bits_len (_axi4index_auto_out_ar_bits_len),
.auto_out_ar_bits_size (_axi4index_auto_out_ar_bits_size),
.auto_out_ar_bits_burst (_axi4index_auto_out_ar_bits_burst),
.auto_out_ar_bits_lock (_axi4index_auto_out_ar_bits_lock),
.auto_out_ar_bits_cache (_axi4index_auto_out_ar_bits_cache),
.auto_out_ar_bits_prot (_axi4index_auto_out_ar_bits_prot),
.auto_out_ar_bits_qos (_axi4index_auto_out_ar_bits_qos),
.auto_out_ar_bits_echo_extra_id (_axi4index_auto_out_ar_bits_echo_extra_id),
.auto_out_r_ready (_axi4index_auto_out_r_ready),
.auto_out_r_valid (_axi4frag_auto_in_r_valid), // @[Fragmenter.scala:224:30]
.auto_out_r_bits_id (_axi4frag_auto_in_r_bits_id), // @[Fragmenter.scala:224:30]
.auto_out_r_bits_data (_axi4frag_auto_in_r_bits_data), // @[Fragmenter.scala:224:30]
.auto_out_r_bits_resp (_axi4frag_auto_in_r_bits_resp), // @[Fragmenter.scala:224:30]
.auto_out_r_bits_echo_extra_id (_axi4frag_auto_in_r_bits_echo_extra_id), // @[Fragmenter.scala:224:30]
.auto_out_r_bits_last (_axi4frag_auto_in_r_bits_last) // @[Fragmenter.scala:224:30]
); // @[IdIndexer.scala:108:31]
assign auto_axi4index_in_aw_ready = auto_axi4index_in_aw_ready_0; // @[LazyModuleImp.scala:138:7]
assign auto_axi4index_in_w_ready = auto_axi4index_in_w_ready_0; // @[LazyModuleImp.scala:138:7]
assign auto_axi4index_in_b_valid = auto_axi4index_in_b_valid_0; // @[LazyModuleImp.scala:138:7]
assign auto_axi4index_in_b_bits_id = auto_axi4index_in_b_bits_id_0; // @[LazyModuleImp.scala:138:7]
assign auto_axi4index_in_b_bits_resp = auto_axi4index_in_b_bits_resp_0; // @[LazyModuleImp.scala:138:7]
assign auto_axi4index_in_ar_ready = auto_axi4index_in_ar_ready_0; // @[LazyModuleImp.scala:138:7]
assign auto_axi4index_in_r_valid = auto_axi4index_in_r_valid_0; // @[LazyModuleImp.scala:138:7]
assign auto_axi4index_in_r_bits_id = auto_axi4index_in_r_bits_id_0; // @[LazyModuleImp.scala:138:7]
assign auto_axi4index_in_r_bits_data = auto_axi4index_in_r_bits_data_0; // @[LazyModuleImp.scala:138:7]
assign auto_axi4index_in_r_bits_resp = auto_axi4index_in_r_bits_resp_0; // @[LazyModuleImp.scala:138:7]
assign auto_axi4index_in_r_bits_last = auto_axi4index_in_r_bits_last_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_valid = auto_tl_out_a_valid_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_bits_opcode = auto_tl_out_a_bits_opcode_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_bits_param = auto_tl_out_a_bits_param_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_bits_size = auto_tl_out_a_bits_size_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_bits_source = auto_tl_out_a_bits_source_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_bits_address = auto_tl_out_a_bits_address_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_bits_user_amba_prot_bufferable = auto_tl_out_a_bits_user_amba_prot_bufferable_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_bits_user_amba_prot_modifiable = auto_tl_out_a_bits_user_amba_prot_modifiable_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_bits_user_amba_prot_readalloc = auto_tl_out_a_bits_user_amba_prot_readalloc_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_bits_user_amba_prot_writealloc = auto_tl_out_a_bits_user_amba_prot_writealloc_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_bits_user_amba_prot_privileged = auto_tl_out_a_bits_user_amba_prot_privileged_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_bits_user_amba_prot_secure = auto_tl_out_a_bits_user_amba_prot_secure_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_bits_user_amba_prot_fetch = auto_tl_out_a_bits_user_amba_prot_fetch_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_bits_mask = auto_tl_out_a_bits_mask_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_bits_data = auto_tl_out_a_bits_data_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_bits_corrupt = auto_tl_out_a_bits_corrupt_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_d_ready = auto_tl_out_d_ready_0; // @[LazyModuleImp.scala:138:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File UnsafeAXI4ToTL.scala:
package ara
import chisel3._
import chisel3.util._
import freechips.rocketchip.amba._
import freechips.rocketchip.amba.axi4._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle {
val data = UInt(dataWidth.W)
val resp = UInt(respWidth.W)
val last = Bool()
val user = BundleMap(userFields)
}
/** Parameters for [[BaseReservableListBuffer]] and all child classes.
*
* @param numEntries Total number of elements that can be stored in the 'data' RAM
* @param numLists Maximum number of linked lists
* @param numBeats Maximum number of beats per entry
*/
case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) {
// Avoid zero-width wires when we call 'log2Ceil'
val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries)
val listBits = if (numLists == 1) 1 else log2Ceil(numLists)
val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats)
}
case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName)
extends MixedAdapterNode(AXI4Imp, TLImp)(
dFn = { case mp =>
TLMasterPortParameters.v2(
masters = mp.masters.zipWithIndex.map { case (m, i) =>
// Support 'numTlTxns' read requests and 'numTlTxns' write requests at once.
val numSourceIds = numTlTxns * 2
TLMasterParameters.v2(
name = m.name,
sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds),
nodePath = m.nodePath
)
},
echoFields = mp.echoFields,
requestFields = AMBAProtField() +: mp.requestFields,
responseKeys = mp.responseKeys
)
},
uFn = { mp =>
AXI4SlavePortParameters(
slaves = mp.managers.map { m =>
val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits))
AXI4SlaveParameters(
address = m.address,
resources = m.resources,
regionType = m.regionType,
executable = m.executable,
nodePath = m.nodePath,
supportsWrite = m.supportsPutPartial.intersect(maxXfer),
supportsRead = m.supportsGet.intersect(maxXfer),
interleavedId = Some(0) // TL2 never interleaves D beats
)
},
beatBytes = mp.beatBytes,
minLatency = mp.minLatency,
responseFields = mp.responseFields,
requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt)
)
}
)
class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule {
require(numTlTxns >= 1)
require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2")
val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt)
lazy val module = new LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
edgeIn.master.masters.foreach { m =>
require(m.aligned, "AXI4ToTL requires aligned requests")
}
val numIds = edgeIn.master.endId
val beatBytes = edgeOut.slave.beatBytes
val maxTransfer = edgeOut.slave.maxTransfer
val maxBeats = maxTransfer / beatBytes
// Look for an Error device to redirect bad requests
val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError")
require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.")
val errorDev = errorDevs.maxBy(_.maxTransfer)
val errorDevAddr = errorDev.address.head.base
require(
errorDev.supportsPutPartial.contains(maxTransfer),
s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer"
)
require(
errorDev.supportsGet.contains(maxTransfer),
s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer"
)
// All of the read-response reordering logic.
val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields)
val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats)
val listBuffer = if (numTlTxns > 1) {
Module(new ReservableListBuffer(listBufData, listBufParams))
} else {
Module(new PassthroughListBuffer(listBufData, listBufParams))
}
// To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to
// 0 for read requests and 1 for write requests.
val isReadSourceBit = 0.U(1.W)
val isWriteSourceBit = 1.U(1.W)
/* Read request logic */
val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val rBytes1 = in.ar.bits.bytes1()
val rSize = OH1ToUInt(rBytes1)
val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize)
val rId = if (numTlTxns > 1) {
Cat(isReadSourceBit, listBuffer.ioReservedIndex)
} else {
isReadSourceBit
}
val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Indicates if there are still valid TileLink source IDs left to use.
val canIssueR = listBuffer.ioReserve.ready
listBuffer.ioReserve.bits := in.ar.bits.id
listBuffer.ioReserve.valid := in.ar.valid && rOut.ready
in.ar.ready := rOut.ready && canIssueR
rOut.valid := in.ar.valid && canIssueR
rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2
rOut.bits.user :<= in.ar.bits.user
rOut.bits.user.lift(AMBAProt).foreach { rProt =>
rProt.privileged := in.ar.bits.prot(0)
rProt.secure := !in.ar.bits.prot(1)
rProt.fetch := in.ar.bits.prot(2)
rProt.bufferable := in.ar.bits.cache(0)
rProt.modifiable := in.ar.bits.cache(1)
rProt.readalloc := in.ar.bits.cache(2)
rProt.writealloc := in.ar.bits.cache(3)
}
/* Write request logic */
// Strip off the MSB, which identifies the transaction as read vs write.
val strippedResponseSourceId = if (numTlTxns > 1) {
out.d.bits.source((out.d.bits.source).getWidth - 2, 0)
} else {
// When there's only 1 TileLink transaction allowed for read/write, then this field is always 0.
0.U(1.W)
}
// Track when a write request burst is in progress.
val writeBurstBusy = RegInit(false.B)
when(in.w.fire) {
writeBurstBusy := !in.w.bits.last
}
val usedWriteIds = RegInit(0.U(numTlTxns.W))
val canIssueW = !usedWriteIds.andR
val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W))
val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W))
usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet
// Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't
// change mid-burst.
val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W))
val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy
val freeWriteIdIndex = OHToUInt(freeWriteIdOH)
freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds
val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val wBytes1 = in.aw.bits.bytes1()
val wSize = OH1ToUInt(wBytes1)
val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize)
val wId = if (numTlTxns > 1) {
Cat(isWriteSourceBit, freeWriteIdIndex)
} else {
isWriteSourceBit
}
val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain
// asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but
// the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb
// bits during a W-channel burst.
in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW
in.w.ready := wOut.ready && in.aw.valid && canIssueW
wOut.valid := in.aw.valid && in.w.valid && canIssueW
wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2
in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ }
wOut.bits.user :<= in.aw.bits.user
wOut.bits.user.lift(AMBAProt).foreach { wProt =>
wProt.privileged := in.aw.bits.prot(0)
wProt.secure := !in.aw.bits.prot(1)
wProt.fetch := in.aw.bits.prot(2)
wProt.bufferable := in.aw.bits.cache(0)
wProt.modifiable := in.aw.bits.cache(1)
wProt.readalloc := in.aw.bits.cache(2)
wProt.writealloc := in.aw.bits.cache(3)
}
// Merge the AXI4 read/write requests into the TL-A channel.
TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut))
/* Read/write response logic */
val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle)))
val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle)))
val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY)
val dHasData = edgeOut.hasData(out.d.bits)
val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d)
val dNumBeats1 = edgeOut.numBeats1(out.d.bits)
// Handle cases where writeack arrives before write is done
val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U
out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck)
listBuffer.ioDataOut.ready := okR.ready
okR.valid := listBuffer.ioDataOut.valid
okB.valid := out.d.valid && !dHasData && !writeEarlyAck
listBuffer.ioResponse.valid := out.d.valid && dHasData
listBuffer.ioResponse.bits.index := strippedResponseSourceId
listBuffer.ioResponse.bits.data.data := out.d.bits.data
listBuffer.ioResponse.bits.data.resp := dResp
listBuffer.ioResponse.bits.data.last := dLast
listBuffer.ioResponse.bits.data.user :<= out.d.bits.user
listBuffer.ioResponse.bits.count := dCount
listBuffer.ioResponse.bits.numBeats1 := dNumBeats1
okR.bits.id := listBuffer.ioDataOut.bits.listIndex
okR.bits.data := listBuffer.ioDataOut.bits.payload.data
okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp
okR.bits.last := listBuffer.ioDataOut.bits.payload.last
okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user
// Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write
// response, mark the write transaction as complete.
val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W))
val writeResponseId = writeIdMap.read(strippedResponseSourceId)
when(wOut.fire) {
writeIdMap.write(freeWriteIdIndex, in.aw.bits.id)
}
when(edgeOut.done(wOut)) {
usedWriteIdsSet := freeWriteIdOH
}
when(okB.fire) {
usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns)
}
okB.bits.id := writeResponseId
okB.bits.resp := dResp
okB.bits.user :<= out.d.bits.user
// AXI4 needs irrevocable behaviour
in.r <> Queue.irrevocable(okR, 1, flow = true)
in.b <> Queue.irrevocable(okB, 1, flow = true)
// Unused channels
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
/* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */
def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = {
val lReqType = reqType.toLowerCase
when(a.valid) {
assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U)
// Narrow transfers and FIXED bursts must be single-beat bursts.
when(a.bits.len =/= 0.U) {
assert(
a.bits.size === log2Ceil(beatBytes).U,
s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)",
1.U << a.bits.size,
a.bits.len + 1.U
)
assert(
a.bits.burst =/= AXI4Parameters.BURST_FIXED,
s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)",
a.bits.len + 1.U
)
}
// Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in
// particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink
// Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts.
}
}
checkRequest(in.ar, "Read")
checkRequest(in.aw, "Write")
}
}
}
object UnsafeAXI4ToTL {
def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = {
val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt))
axi42tl.node
}
}
/* ReservableListBuffer logic, and associated classes. */
class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle {
val index = UInt(params.entryBits.W)
val count = UInt(params.beatBits.W)
val numBeats1 = UInt(params.beatBits.W)
}
class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle {
val listIndex = UInt(params.listBits.W)
}
/** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */
abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends Module {
require(params.numEntries > 0)
require(params.numLists > 0)
val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W))))
val ioReservedIndex = IO(Output(UInt(params.entryBits.W)))
val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params))))
val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params)))
}
/** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve
* linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the
* 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a
* given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order.
*
* ==Constructor==
* @param gen Chisel type of linked list data element
* @param params Other parameters
*
* ==Module IO==
* @param ioReserve Index of list to reserve a new element in
* @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire'
* @param ioResponse Payload containing response data and linked-list-entry index
* @param ioDataOut Payload containing data read from response linked list and linked list index
*/
class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
val valid = RegInit(0.U(params.numLists.W))
val head = Mem(params.numLists, UInt(params.entryBits.W))
val tail = Mem(params.numLists, UInt(params.entryBits.W))
val used = RegInit(0.U(params.numEntries.W))
val next = Mem(params.numEntries, UInt(params.entryBits.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) }
val dataIsPresent = RegInit(0.U(params.numEntries.W))
val beats = Mem(params.numEntries, UInt(params.beatBits.W))
// The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower.
val dataMemReadEnable = WireDefault(false.B)
val dataMemWriteEnable = WireDefault(false.B)
assert(!(dataMemReadEnable && dataMemWriteEnable))
// 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the
// lowest-index entry in the 'data' RAM which is free.
val freeOH = Wire(UInt(params.numEntries.W))
val freeIndex = OHToUInt(freeOH)
freeOH := ~(leftOR(~used) << 1) & ~used
ioReservedIndex := freeIndex
val validSet = WireDefault(0.U(params.numLists.W))
val validClr = WireDefault(0.U(params.numLists.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
val dataIsPresentSet = WireDefault(0.U(params.numEntries.W))
val dataIsPresentClr = WireDefault(0.U(params.numEntries.W))
valid := (valid & ~validClr) | validSet
used := (used & ~usedClr) | usedSet
dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet
/* Reservation logic signals */
val reserveTail = Wire(UInt(params.entryBits.W))
val reserveIsValid = Wire(Bool())
/* Response logic signals */
val responseIndex = Wire(UInt(params.entryBits.W))
val responseListIndex = Wire(UInt(params.listBits.W))
val responseHead = Wire(UInt(params.entryBits.W))
val responseTail = Wire(UInt(params.entryBits.W))
val nextResponseHead = Wire(UInt(params.entryBits.W))
val nextDataIsPresent = Wire(Bool())
val isResponseInOrder = Wire(Bool())
val isEndOfList = Wire(Bool())
val isLastBeat = Wire(Bool())
val isLastResponseBeat = Wire(Bool())
val isLastUnwindBeat = Wire(Bool())
/* Reservation logic */
reserveTail := tail.read(ioReserve.bits)
reserveIsValid := valid(ioReserve.bits)
ioReserve.ready := !used.andR
// When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we
// actually start a new list, rather than appending to a list that's about to disappear.
val reserveResponseSameList = ioReserve.bits === responseListIndex
val appendToAndDestroyList =
ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat
when(ioReserve.fire) {
validSet := UIntToOH(ioReserve.bits, params.numLists)
usedSet := freeOH
when(reserveIsValid && !appendToAndDestroyList) {
next.write(reserveTail, freeIndex)
}.otherwise {
head.write(ioReserve.bits, freeIndex)
}
tail.write(ioReserve.bits, freeIndex)
map.write(freeIndex, ioReserve.bits)
}
/* Response logic */
// The majority of the response logic (reading from and writing to the various RAMs) is common between the
// response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid).
// The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the
// 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and
// response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after
// two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker.
responseHead := head.read(responseListIndex)
responseTail := tail.read(responseListIndex)
nextResponseHead := next.read(responseIndex)
nextDataIsPresent := dataIsPresent(nextResponseHead)
// Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since
// there isn't a next element in the linked list.
isResponseInOrder := responseHead === responseIndex
isEndOfList := responseHead === responseTail
isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1
// When a response's last beat is sent to the output channel, mark it as completed. This can happen in two
// situations:
// 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM
// reservation was never needed.
// 2. An entry is read out of the 'data' SRAM (within the unwind FSM).
when(ioDataOut.fire && isLastBeat) {
// Mark the reservation as no-longer-used.
usedClr := UIntToOH(responseIndex, params.numEntries)
// If the response is in-order, then we're popping an element from this linked list.
when(isEndOfList) {
// Once we pop the last element from a linked list, mark it as no-longer-present.
validClr := UIntToOH(responseListIndex, params.numLists)
}.otherwise {
// Move the linked list's head pointer to the new head pointer.
head.write(responseListIndex, nextResponseHead)
}
}
// If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding.
when(ioResponse.fire && !isResponseInOrder) {
dataMemWriteEnable := true.B
when(isLastResponseBeat) {
dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries)
beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1)
}
}
// Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to.
val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats)
(responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) =>
when(select && dataMemWriteEnable) {
seqMem.write(ioResponse.bits.index, ioResponse.bits.data)
}
}
/* Response unwind logic */
// Unwind FSM state definitions
val sIdle :: sUnwinding :: Nil = Enum(2)
val unwindState = RegInit(sIdle)
val busyUnwinding = unwindState === sUnwinding
val startUnwind = Wire(Bool())
val stopUnwind = Wire(Bool())
when(startUnwind) {
unwindState := sUnwinding
}.elsewhen(stopUnwind) {
unwindState := sIdle
}
assert(!(startUnwind && stopUnwind))
// Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to
// become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is
// invalid.
//
// Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to
// worry about overwriting the 'data' SRAM's output when we start the unwind FSM.
startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent
// Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of
// two things happens:
// 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent)
// 2. There are no more outstanding responses in this list (isEndOfList)
//
// Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are
// passing from 'ioResponse' to 'ioDataOut'.
stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList)
val isUnwindBurstOver = Wire(Bool())
val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable)
// Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of
// beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we
// increment 'beatCounter' until it reaches 'unwindBeats1'.
val unwindBeats1 = Reg(UInt(params.beatBits.W))
val nextBeatCounter = Wire(UInt(params.beatBits.W))
val beatCounter = RegNext(nextBeatCounter)
isUnwindBurstOver := beatCounter === unwindBeats1
when(startNewBurst) {
unwindBeats1 := beats.read(nextResponseHead)
nextBeatCounter := 0.U
}.elsewhen(dataMemReadEnable) {
nextBeatCounter := beatCounter + 1.U
}.otherwise {
nextBeatCounter := beatCounter
}
// When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next
// entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which
// happens at the start of reading a new stored burst).
val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst)
responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index)
// Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the
// SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead
// holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'.
val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex)
// The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid
// until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle).
val unwindDataIsValid = RegInit(false.B)
when(dataMemReadEnable) {
unwindDataIsValid := true.B
}.elsewhen(ioDataOut.fire) {
unwindDataIsValid := false.B
}
isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid
// Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats.
isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat)
// Select which SRAM to read from based on the beat counter.
val dataOutputVec = Wire(Vec(params.numBeats, gen))
val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats)
(nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) =>
dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable)
}
// Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured
// by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading
// from.
val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable)
// Mark 'data' burst entries as no-longer-present as they get read out of the SRAM.
when(dataMemReadEnable) {
dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries)
}
// As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue
// a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know
// we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be
// consumed by the output channel).
val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready
dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem)
// While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need
// 'responseListIndex' to be coherent for the entire unwind process.
val rawResponseListIndex = map.read(responseIndex)
val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst)
responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex)
// Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are
// just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that
// could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be
// single-ported.
ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding
// Either pass an in-order response to the output channel, or data read from the unwind FSM.
ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder)
ioDataOut.bits.listIndex := responseListIndex
ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data)
// It's an error to get a response that isn't associated with a valid linked list.
when(ioResponse.fire || unwindDataIsValid) {
assert(
valid(responseListIndex),
"No linked list exists at index %d, mapped from %d",
responseListIndex,
responseIndex
)
}
when(busyUnwinding && dataMemReadEnable) {
assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order")
}
}
/** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1.
*
* Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to
* reorder any responses, or store any linked lists.
*/
class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1")
val used = RegInit(0.U(params.numEntries.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
used := (used & ~usedClr) | usedSet
ioReserve.ready := used === 0.U
// Store which list index was reserved, we need to return this value when we get a response.
when(ioReserve.fire) {
usedSet := 1.U
map.write(0.U, ioReserve.bits)
}
// There's only one valid linked list entry, which is at index 0.
ioReservedIndex := 0.U
val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1
// Mark the linked list as empty when we get the last beat in a response.
// Note that 'ioResponse.fire === ioDataOut.fire'.
when(ioResponse.fire && isLastResponseBeat) {
usedClr := 1.U
}
// Always pass the response data straight through, since we never need to reorder the response data.
ioDataOut.bits.listIndex := map.read(0.U)
ioDataOut.bits.payload := ioResponse.bits.data
ioDataOut.valid := ioResponse.valid
ioResponse.ready := ioDataOut.ready
}
| module dataMems_383( // @[UnsafeAXI4ToTL.scala:365:62]
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [66:0] R0_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [66:0] W0_data
);
dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62]
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data)
); // @[UnsafeAXI4ToTL.scala:365:62]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w4_d3_i0_2( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input [3:0] io_d, // @[ShiftReg.scala:36:14]
output [3:0] io_q // @[ShiftReg.scala:36:14]
);
wire [3:0] io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_2 = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_4 = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_6 = reset; // @[SynchronizerReg.scala:86:21]
wire [3:0] _io_q_T; // @[SynchronizerReg.scala:90:14]
wire [3:0] io_q_0; // @[SynchronizerReg.scala:80:7]
wire _output_T_1 = io_d_0[0]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire _output_T_3 = io_d_0[1]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_1; // @[ShiftReg.scala:48:24]
wire _output_T_5 = io_d_0[2]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_2; // @[ShiftReg.scala:48:24]
wire _output_T_7 = io_d_0[3]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_3; // @[ShiftReg.scala:48:24]
wire [1:0] io_q_lo = {output_1, output_0}; // @[SynchronizerReg.scala:90:14]
wire [1:0] io_q_hi = {output_3, output_2}; // @[SynchronizerReg.scala:90:14]
assign _io_q_T = {io_q_hi, io_q_lo}; // @[SynchronizerReg.scala:90:14]
assign io_q_0 = _io_q_T; // @[SynchronizerReg.scala:80:7, :90:14]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_50 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_51 output_chain_1 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T_2), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_3), // @[SynchronizerReg.scala:87:41]
.io_q (output_1)
); // @[ShiftReg.scala:45:23]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_52 output_chain_2 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T_4), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_5), // @[SynchronizerReg.scala:87:41]
.io_q (output_2)
); // @[ShiftReg.scala:45:23]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_53 output_chain_3 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T_6), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_7), // @[SynchronizerReg.scala:87:41]
.io_q (output_3)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftRegisterPriorityQueue.scala:
package compressacc
import chisel3._
import chisel3.util._
import chisel3.util._
// TODO : support enq & deq at the same cycle
class PriorityQueueStageIO(keyWidth: Int, value: ValueInfo) extends Bundle {
val output_prev = KeyValue(keyWidth, value)
val output_nxt = KeyValue(keyWidth, value)
val input_prev = Flipped(KeyValue(keyWidth, value))
val input_nxt = Flipped(KeyValue(keyWidth, value))
val cmd = Flipped(Valid(UInt(1.W)))
val insert_here = Input(Bool())
val cur_input_keyval = Flipped(KeyValue(keyWidth, value))
val cur_output_keyval = KeyValue(keyWidth, value)
}
class PriorityQueueStage(keyWidth: Int, value: ValueInfo) extends Module {
val io = IO(new PriorityQueueStageIO(keyWidth, value))
dontTouch(io)
val CMD_DEQ = 0.U
val CMD_ENQ = 1.U
val MAX_VALUE = (1 << keyWidth) - 1
val key_reg = RegInit(MAX_VALUE.U(keyWidth.W))
val value_reg = Reg(value)
io.output_prev.key := key_reg
io.output_prev.value := value_reg
io.output_nxt.key := key_reg
io.output_nxt.value := value_reg
io.cur_output_keyval.key := key_reg
io.cur_output_keyval.value := value_reg
when (io.cmd.valid) {
switch (io.cmd.bits) {
is (CMD_DEQ) {
key_reg := io.input_nxt.key
value_reg := io.input_nxt.value
}
is (CMD_ENQ) {
when (io.insert_here) {
key_reg := io.cur_input_keyval.key
value_reg := io.cur_input_keyval.value
} .elsewhen (key_reg >= io.cur_input_keyval.key) {
key_reg := io.input_prev.key
value_reg := io.input_prev.value
} .otherwise {
// do nothing
}
}
}
}
}
object PriorityQueueStage {
def apply(keyWidth: Int, v: ValueInfo): PriorityQueueStage = new PriorityQueueStage(keyWidth, v)
}
// TODO
// - This design is not scalable as the enqued_keyval is broadcasted to all the stages
// - Add pipeline registers later
class PriorityQueueIO(queSize: Int, keyWidth: Int, value: ValueInfo) extends Bundle {
val cnt_bits = log2Ceil(queSize+1)
val counter = Output(UInt(cnt_bits.W))
val enq = Flipped(Decoupled(KeyValue(keyWidth, value)))
val deq = Decoupled(KeyValue(keyWidth, value))
}
class PriorityQueue(queSize: Int, keyWidth: Int, value: ValueInfo) extends Module {
val keyWidthInternal = keyWidth + 1
val CMD_DEQ = 0.U
val CMD_ENQ = 1.U
val io = IO(new PriorityQueueIO(queSize, keyWidthInternal, value))
dontTouch(io)
val MAX_VALUE = ((1 << keyWidthInternal) - 1).U
val cnt_bits = log2Ceil(queSize+1)
// do not consider cases where we are inserting more entries then the queSize
val counter = RegInit(0.U(cnt_bits.W))
io.counter := counter
val full = (counter === queSize.U)
val empty = (counter === 0.U)
io.deq.valid := !empty
io.enq.ready := !full
when (io.enq.fire) {
counter := counter + 1.U
}
when (io.deq.fire) {
counter := counter - 1.U
}
val cmd_valid = io.enq.valid || io.deq.ready
val cmd = Mux(io.enq.valid, CMD_ENQ, CMD_DEQ)
assert(!(io.enq.valid && io.deq.ready))
val stages = Seq.fill(queSize)(Module(new PriorityQueueStage(keyWidthInternal, value)))
for (i <- 0 until (queSize - 1)) {
stages(i+1).io.input_prev <> stages(i).io.output_nxt
stages(i).io.input_nxt <> stages(i+1).io.output_prev
}
stages(queSize-1).io.input_nxt.key := MAX_VALUE
// stages(queSize-1).io.input_nxt.value :=
stages(queSize-1).io.input_nxt.value.symbol := 0.U
// stages(queSize-1).io.input_nxt.value.child(0) := 0.U
// stages(queSize-1).io.input_nxt.value.child(1) := 0.U
stages(0).io.input_prev.key := io.enq.bits.key
stages(0).io.input_prev.value <> io.enq.bits.value
for (i <- 0 until queSize) {
stages(i).io.cmd.valid := cmd_valid
stages(i).io.cmd.bits := cmd
stages(i).io.cur_input_keyval <> io.enq.bits
}
val is_large_or_equal = WireInit(VecInit(Seq.fill(queSize)(false.B)))
for (i <- 0 until queSize) {
is_large_or_equal(i) := (stages(i).io.cur_output_keyval.key >= io.enq.bits.key)
}
val is_large_or_equal_cat = Wire(UInt(queSize.W))
is_large_or_equal_cat := Cat(is_large_or_equal.reverse)
val insert_here_idx = PriorityEncoder(is_large_or_equal_cat)
for (i <- 0 until queSize) {
when (i.U === insert_here_idx) {
stages(i).io.insert_here := true.B
} .otherwise {
stages(i).io.insert_here := false.B
}
}
io.deq.bits <> stages(0).io.output_prev
}
| module PriorityQueueStage_110( // @[ShiftRegisterPriorityQueue.scala:21:7]
input clock, // @[ShiftRegisterPriorityQueue.scala:21:7]
input reset, // @[ShiftRegisterPriorityQueue.scala:21:7]
output [30:0] io_output_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_output_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [30:0] io_output_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_output_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_input_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_input_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_input_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_input_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_cmd_valid, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_cmd_bits, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_insert_here, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_cur_input_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_cur_input_keyval_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [30:0] io_cur_output_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_cur_output_keyval_value_symbol // @[ShiftRegisterPriorityQueue.scala:22:14]
);
wire [30:0] io_input_prev_key_0 = io_input_prev_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_input_prev_value_symbol_0 = io_input_prev_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_input_nxt_key_0 = io_input_nxt_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_input_nxt_value_symbol_0 = io_input_nxt_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_cmd_valid_0 = io_cmd_valid; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_cmd_bits_0 = io_cmd_bits; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_insert_here_0 = io_insert_here; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_cur_input_keyval_key_0 = io_cur_input_keyval_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_cur_input_keyval_value_symbol_0 = io_cur_input_keyval_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
reg [30:0] key_reg; // @[ShiftRegisterPriorityQueue.scala:30:24]
assign io_output_prev_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
assign io_output_nxt_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
assign io_cur_output_keyval_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
reg [9:0] value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:31:22]
assign io_output_prev_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
assign io_output_nxt_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
assign io_cur_output_keyval_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
wire _T_2 = key_reg >= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24, :52:30]
always @(posedge clock) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (reset) // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= 31'h7FFFFFFF; // @[ShiftRegisterPriorityQueue.scala:30:24]
else if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30]
key_reg <= io_input_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
end
else // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= io_input_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
end
if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7]
value_reg_symbol <= io_cur_input_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30]
value_reg_symbol <= io_input_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
end
else // @[ShiftRegisterPriorityQueue.scala:21:7]
value_reg_symbol <= io_input_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
end
always @(posedge)
assign io_output_prev_key = io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_prev_value_symbol = io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_nxt_key = io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_nxt_value_symbol = io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_cur_output_keyval_key = io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_cur_output_keyval_value_symbol = io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
endmodule |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.